Compare commits
60 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 4413c311e9 | |||
| 35a1357203 | |||
| f795be4274 | |||
| 4fd6254e93 | |||
| aa95dc534d | |||
| 3643d50f1a | |||
| 8b9f9f0a74 | |||
| 271a574f46 | |||
| 3787541733 | |||
| dd632d05e3 | |||
| 3c02a8ab23 | |||
| 907d676d39 | |||
| 4a96a9675c | |||
| 29007a7a43 | |||
| 275adaddc3 | |||
| 06e3dae085 | |||
| d04144933d | |||
| 411aa946ee | |||
| 2a8f455356 | |||
| f7cdbcb4d4 | |||
| 00c78c88d2 | |||
| 471a0f90b5 | |||
| 5b9b55c24d | |||
| 75aba0413f | |||
| 448831f72b | |||
| 5818b372a3 | |||
| f7456dde5e | |||
| 0ab841674c | |||
| a51e980608 | |||
| 94e5bbdb8a | |||
| d06426775b | |||
| 3d31925216 | |||
| 04e54c6f6e | |||
| ca97cdbfba | |||
| c20e6c2637 | |||
| 25308c2392 | |||
| 4437771a12 | |||
| d52b5d7d0e | |||
| 307c2a4cd4 | |||
| 978f6021de | |||
| 48a6f28e59 | |||
| 77d66a0bba | |||
| 849586d577 | |||
| f9cadcca3d | |||
| e1a5b52d07 | |||
| 1039ea153d | |||
| c5174b402c | |||
| 11d8666a4d | |||
| 78e8185439 | |||
| 806381a992 | |||
| 6db5512b84 | |||
| 59026f3be5 | |||
| 48910c8da2 | |||
| f812570a23 | |||
| ba24c5135c | |||
| b2fd9561df | |||
| 13855ba7fc | |||
| 6fc77f847c | |||
| 3b30e8723c | |||
| cb51b7e0ab |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -67,7 +67,7 @@ bower_components/
|
||||
.flooignore
|
||||
|
||||
#config file
|
||||
librechat.yaml
|
||||
#librechat.yaml
|
||||
librechat.yml
|
||||
|
||||
# Environment
|
||||
|
||||
259
deploy-compose.swarm.yml
Normal file
259
deploy-compose.swarm.yml
Normal file
@@ -0,0 +1,259 @@
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
api:
|
||||
# build:
|
||||
# context: .
|
||||
# dockerfile: Dockerfile.multi
|
||||
# target: api-build
|
||||
image: ghcr.io/danny-avila/librechat-dev-api:latest
|
||||
# ports:
|
||||
# - 3080:3080
|
||||
# Note: depends_on is ignored in Docker Swarm mode
|
||||
# Services start in parallel, so API must handle connection retries
|
||||
# depends_on:
|
||||
# - mongodb
|
||||
# - rag_api
|
||||
networks:
|
||||
- net
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
env_file:
|
||||
- stack.env
|
||||
environment:
|
||||
- HOST=0.0.0.0
|
||||
- NODE_ENV=production
|
||||
- MONGO_URI=mongodb://mongodb:27017/LibreChat
|
||||
- MEILI_HOST=http://meilisearch:7700
|
||||
- RAG_PORT=${RAG_PORT:-8000}
|
||||
- RAG_API_URL=http://rag_api:${RAG_PORT:-8000}
|
||||
volumes:
|
||||
- type: bind
|
||||
source: /home/trav/dkr/LibreChat/librechat.yaml
|
||||
target: /app/librechat.yaml
|
||||
- /home/trav/dkr/LibreChat/images:/app/client/public/images
|
||||
- /home/trav/dkr/LibreChat/uploads:/app/uploads
|
||||
- /home/trav/dkr/LibreChat/logs:/app/api/logs
|
||||
- /home/trav/claude-scripts:/mnt/claude-scripts
|
||||
- /home/trav/dkr:/mnt/dkr
|
||||
- /home/trav/biz-bud:/mnt/biz-bud
|
||||
- /home/trav/portainer:/mnt/portainer
|
||||
- /home/trav/repos:/mnt/repos
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -f http://localhost:3080/health || exit 1"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
delay: 20s
|
||||
max_attempts: 10
|
||||
window: 2m
|
||||
update_config:
|
||||
parallelism: 1
|
||||
delay: 20s
|
||||
failure_action: rollback
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == little
|
||||
|
||||
client:
|
||||
image: nginx:1.27.0-alpine
|
||||
# ports:
|
||||
# - 80:80
|
||||
# - 443:443
|
||||
# Note: depends_on is ignored in Docker Swarm mode
|
||||
# depends_on:
|
||||
# - api
|
||||
networks:
|
||||
- net
|
||||
- badge-net
|
||||
volumes:
|
||||
- /home/trav/dkr/LibreChat/client/nginx.conf:/etc/nginx/conf.d/default.conf
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
delay: 15s
|
||||
max_attempts: 10
|
||||
window: 2m
|
||||
update_config:
|
||||
parallelism: 1
|
||||
delay: 15s
|
||||
failure_action: rollback
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == little
|
||||
|
||||
mongodb:
|
||||
# ports: # Uncomment this to access mongodb from outside docker, not safe in deployment
|
||||
# - 27018:27017
|
||||
image: mongo
|
||||
networks:
|
||||
- net
|
||||
volumes:
|
||||
- librechat-mongodb:/data/db
|
||||
command: mongod --noauth
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "mongosh --eval 'db.adminCommand(\"ping\")' --quiet || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 30s
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
delay: 20s
|
||||
max_attempts: 15
|
||||
window: 3m
|
||||
update_config:
|
||||
parallelism: 1
|
||||
delay: 20s
|
||||
failure_action: rollback
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == little
|
||||
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:v1.12.3
|
||||
networks:
|
||||
- net
|
||||
# ports: # Uncomment this to access meilisearch from outside docker
|
||||
# - 7700:7700 # if exposing these ports, make sure your master key is not the default value
|
||||
env_file:
|
||||
- stack.env
|
||||
environment:
|
||||
- MEILI_HOST=http://meilisearch:7700
|
||||
- MEILI_NO_ANALYTICS=true
|
||||
volumes:
|
||||
- librechat-meili_data:/meili_data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -f http://localhost:7700/health || wget --spider -q http://localhost:7700/health || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 30s
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
delay: 20s
|
||||
max_attempts: 15
|
||||
window: 3m
|
||||
update_config:
|
||||
parallelism: 1
|
||||
delay: 20s
|
||||
failure_action: rollback
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == little
|
||||
|
||||
vectordb:
|
||||
image: pgvector/pgvector:0.8.0-pg15-trixie
|
||||
environment:
|
||||
POSTGRES_DB: mydatabase
|
||||
POSTGRES_USER: myuser
|
||||
POSTGRES_PASSWORD: mypassword
|
||||
networks:
|
||||
- net
|
||||
volumes:
|
||||
- librechat-pgdata:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U myuser -d mydatabase"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 30s
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
delay: 20s
|
||||
max_attempts: 15
|
||||
window: 3m
|
||||
update_config:
|
||||
parallelism: 1
|
||||
delay: 20s
|
||||
failure_action: rollback
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == little
|
||||
|
||||
rag_api:
|
||||
image: ghcr.io/danny-avila/librechat-rag-api-dev-lite:latest
|
||||
environment:
|
||||
- DB_HOST=vectordb
|
||||
- RAG_PORT=${RAG_PORT:-8000}
|
||||
networks:
|
||||
- net
|
||||
# Note: depends_on is ignored in Docker Swarm mode
|
||||
# depends_on:
|
||||
# - vectordb
|
||||
env_file:
|
||||
- stack.env
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "python3 -c \"import urllib.request; urllib.request.urlopen('http://localhost:8000/health')\""]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 40s
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
delay: 20s
|
||||
max_attempts: 15
|
||||
window: 3m
|
||||
update_config:
|
||||
parallelism: 1
|
||||
delay: 20s
|
||||
failure_action: rollback
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == little
|
||||
|
||||
metrics:
|
||||
image: ghcr.io/virtuos/librechat_exporter:main
|
||||
# Note: depends_on is ignored in Docker Swarm mode
|
||||
# depends_on:
|
||||
# - mongodb
|
||||
# ports:
|
||||
# - "8000:8000"
|
||||
networks:
|
||||
- net
|
||||
- observability_observability
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
delay: 15s
|
||||
max_attempts: 10
|
||||
window: 2m
|
||||
update_config:
|
||||
parallelism: 1
|
||||
delay: 15s
|
||||
failure_action: rollback
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == little
|
||||
|
||||
volumes:
|
||||
librechat-pgdata:
|
||||
name: librechat-pgdata
|
||||
librechat-mongodb:
|
||||
name: librechat-mongodb
|
||||
librechat-meili_data:
|
||||
name: librechat-meili_data
|
||||
|
||||
networks:
|
||||
net:
|
||||
driver: overlay
|
||||
attachable: true
|
||||
badge-net:
|
||||
external: true
|
||||
observability_observability:
|
||||
external: true
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
services:
|
||||
api:
|
||||
container_name: LibreChat
|
||||
container_name: librechat
|
||||
ports:
|
||||
- "${PORT}:${PORT}"
|
||||
depends_on:
|
||||
@@ -24,17 +24,36 @@ services:
|
||||
- type: bind
|
||||
source: ./.env
|
||||
target: /app/.env
|
||||
- ./librechat.yaml:/app/librechat.yaml
|
||||
- ./images:/app/client/public/images
|
||||
- ./uploads:/app/uploads
|
||||
- ./logs:/app/logs
|
||||
networks:
|
||||
- chat-net
|
||||
client:
|
||||
image: nginx:1.27.0-alpine
|
||||
container_name: librechat-nginx
|
||||
expose:
|
||||
- 80
|
||||
- 443
|
||||
depends_on:
|
||||
- api
|
||||
networks:
|
||||
- chat-net
|
||||
- edge-little
|
||||
restart: always
|
||||
volumes:
|
||||
- ./client/nginx.conf:/etc/nginx/conf.d/default.conf
|
||||
mongodb:
|
||||
container_name: chat-mongodb
|
||||
container_name: librechat-mongodb
|
||||
image: mongo
|
||||
restart: always
|
||||
user: "${UID}:${GID}"
|
||||
volumes:
|
||||
- ./data-node:/data/db
|
||||
command: mongod --noauth
|
||||
networks:
|
||||
- chat-net
|
||||
meilisearch:
|
||||
container_name: chat-meilisearch
|
||||
image: getmeili/meilisearch:v1.12.3
|
||||
@@ -46,8 +65,10 @@ services:
|
||||
- MEILI_MASTER_KEY=${MEILI_MASTER_KEY}
|
||||
volumes:
|
||||
- ./meili_data_v1.12:/meili_data
|
||||
networks:
|
||||
- chat-net
|
||||
vectordb:
|
||||
container_name: vectordb
|
||||
container_name: librechat-vectordb
|
||||
image: pgvector/pgvector:0.8.0-pg15-trixie
|
||||
environment:
|
||||
POSTGRES_DB: mydatabase
|
||||
@@ -55,9 +76,11 @@ services:
|
||||
POSTGRES_PASSWORD: mypassword
|
||||
restart: always
|
||||
volumes:
|
||||
- pgdata2:/var/lib/postgresql/data
|
||||
- librechat-pgdata:/var/lib/postgresql/data
|
||||
networks:
|
||||
- chat-net
|
||||
rag_api:
|
||||
container_name: rag_api
|
||||
container_name: librechat-rag_api
|
||||
image: ghcr.io/danny-avila/librechat-rag-api-dev-lite:latest
|
||||
environment:
|
||||
- DB_HOST=vectordb
|
||||
@@ -65,8 +88,17 @@ services:
|
||||
restart: always
|
||||
depends_on:
|
||||
- vectordb
|
||||
networks:
|
||||
- chat-net
|
||||
env_file:
|
||||
- .env
|
||||
|
||||
volumes:
|
||||
pgdata2:
|
||||
librechat-pgdata:
|
||||
external: true
|
||||
networks:
|
||||
chat-net:
|
||||
driver: bridge
|
||||
name: chat-net
|
||||
edge-little:
|
||||
external: true
|
||||
584
librechat.yaml
Normal file
584
librechat.yaml
Normal file
@@ -0,0 +1,584 @@
|
||||
# For more information, see the Configuration Guide:
|
||||
# https://www.librechat.ai/docs/configuration/librechat_yaml
|
||||
|
||||
# Configuration version (required)
|
||||
version: 1.3.1
|
||||
|
||||
# Cache settings: Set to true to enable caching
|
||||
cache: true
|
||||
|
||||
# File storage configuration
|
||||
# Single strategy for all file types (legacy format, still supported)
|
||||
fileStrategy: "s3"
|
||||
|
||||
# Granular file storage strategies (new format - recommended)
|
||||
# Allows different storage strategies for different file types
|
||||
# fileStrategy:
|
||||
# avatar: "s3" # Storage for user/agent avatar images
|
||||
# image: "firebase" # Storage for uploaded images in chats
|
||||
# document: "local" # Storage for document uploads (PDFs, text files, etc.)
|
||||
|
||||
# Available strategies: "local", "s3", "firebase"
|
||||
# If not specified, defaults to "local" for all file types
|
||||
# You can mix and match strategies based on your needs:
|
||||
# - Use S3 for avatars for fast global access
|
||||
# - Use Firebase for images with automatic optimization
|
||||
# - Use local storage for documents for privacy/compliance
|
||||
|
||||
ocr:
|
||||
apiKey: "YO2bXkUHLxlJdsXactjlLK4PRZMrBaCo"
|
||||
strategy: "mistral_ocr"
|
||||
mistralModel: "mistral-ocr-latest"
|
||||
|
||||
# Custom interface configuration
|
||||
interface:
|
||||
customWelcome: 'Welcome to LibreChat! Enjoy your experience.'
|
||||
# Enable/disable file search as a chatarea selection (default: true)
|
||||
# Note: This setting does not disable the Agents File Search Capability.
|
||||
# To disable the Agents Capability, see the Agents Endpoint configuration instead.
|
||||
fileSearch: true
|
||||
# Privacy policy settings
|
||||
privacyPolicy:
|
||||
externalUrl: 'https://librechat.ai/privacy-policy'
|
||||
openNewTab: true
|
||||
|
||||
# Terms of service
|
||||
termsOfService:
|
||||
externalUrl: 'https://librechat.ai/tos'
|
||||
openNewTab: true
|
||||
modalAcceptance: true
|
||||
modalTitle: 'Terms of Service for LibreChat'
|
||||
modalContent: |
|
||||
# Terms and Conditions for LibreChat
|
||||
|
||||
*Effective Date: February 18, 2024*
|
||||
|
||||
Welcome to LibreChat, the informational website for the open-source AI chat platform, available at https://librechat.ai. These Terms of Service ("Terms") govern your use of our website and the services we offer. By accessing or using the Website, you agree to be bound by these Terms and our Privacy Policy, accessible at https://librechat.ai//privacy.
|
||||
|
||||
## 1. Ownership
|
||||
|
||||
Upon purchasing a package from LibreChat, you are granted the right to download and use the code for accessing an admin panel for LibreChat. While you own the downloaded code, you are expressly prohibited from reselling, redistributing, or otherwise transferring the code to third parties without explicit permission from LibreChat.
|
||||
|
||||
## 2. User Data
|
||||
|
||||
We collect personal data, such as your name, email address, and payment information, as described in our Privacy Policy. This information is collected to provide and improve our services, process transactions, and communicate with you.
|
||||
|
||||
## 3. Non-Personal Data Collection
|
||||
|
||||
The Website uses cookies to enhance user experience, analyze site usage, and facilitate certain functionalities. By using the Website, you consent to the use of cookies in accordance with our Privacy Policy.
|
||||
|
||||
## 4. Use of the Website
|
||||
|
||||
You agree to use the Website only for lawful purposes and in a manner that does not infringe the rights of, restrict, or inhibit anyone else's use and enjoyment of the Website. Prohibited behavior includes harassing or causing distress or inconvenience to any person, transmitting obscene or offensive content, or disrupting the normal flow of dialogue within the Website.
|
||||
|
||||
## 5. Governing Law
|
||||
|
||||
These Terms shall be governed by and construed in accordance with the laws of the United States, without giving effect to any principles of conflicts of law.
|
||||
|
||||
## 6. Changes to the Terms
|
||||
|
||||
We reserve the right to modify these Terms at any time. We will notify users of any changes by email. Your continued use of the Website after such changes have been notified will constitute your consent to such changes.
|
||||
|
||||
## 7. Contact Information
|
||||
|
||||
If you have any questions about these Terms, please contact us at contact@librechat.ai.
|
||||
|
||||
By using the Website, you acknowledge that you have read these Terms of Service and agree to be bound by them.
|
||||
|
||||
modelSelect: true
|
||||
parameters: true
|
||||
sidePanel: true
|
||||
presets: true
|
||||
prompts: true
|
||||
bookmarks: true
|
||||
multiConvo: true
|
||||
agents: true
|
||||
peoplePicker:
|
||||
users: true
|
||||
groups: true
|
||||
roles: true
|
||||
marketplace:
|
||||
use: true
|
||||
fileCitations: true
|
||||
# Temporary chat retention period in hours (default: 720, min: 1, max: 8760)
|
||||
# temporaryChatRetention: 1
|
||||
|
||||
# Example Cloudflare turnstile (optional)
|
||||
#turnstile:
|
||||
# siteKey: "your-site-key-here"
|
||||
# options:
|
||||
# language: "auto" # "auto" or an ISO 639-1 language code (e.g. en)
|
||||
# size: "normal" # Options: "normal", "compact", "flexible", or "invisible"
|
||||
|
||||
# Example Registration Object Structure (optional)
|
||||
registration:
|
||||
socialLogins: ['openid']
|
||||
# allowedDomains:
|
||||
# - "gmail.com"
|
||||
|
||||
# Example Balance settings
|
||||
# balance:
|
||||
# enabled: false
|
||||
# startBalance: 20000
|
||||
# autoRefillEnabled: false
|
||||
# refillIntervalValue: 30
|
||||
# refillIntervalUnit: 'days'
|
||||
# refillAmount: 10000
|
||||
|
||||
# Example Transactions settings
|
||||
# Controls whether to save transaction records to the database
|
||||
# Default is true (enabled)
|
||||
transactions:
|
||||
enabled: true
|
||||
# Note: If balance.enabled is true, transactions will always be enabled
|
||||
# regardless of this setting to ensure balance tracking works correctly
|
||||
|
||||
speech:
|
||||
speechTab:
|
||||
conversationMode: true
|
||||
advancedMode: true
|
||||
speechToText:
|
||||
engineSTT: "external"
|
||||
languageSTT: "English (US)"
|
||||
autoTranscribeAudio: true
|
||||
decibelValue: -45
|
||||
autoSendText: 0
|
||||
textToSpeech:
|
||||
engineTTS: "external"
|
||||
voice: "alloy"
|
||||
languageTTS: "en"
|
||||
automaticPlayback: true
|
||||
playbackRate: 1.0
|
||||
cacheTTS: true
|
||||
tts:
|
||||
elevenlabs:
|
||||
apiKey: '${TTS_API_KEY}'
|
||||
model: 'eleven_multilingual_v2'
|
||||
voices: ['pNInz6obpgDQGcFmaJgB', 'EXAVITQu4vr4xnSDxMaL', 'JBFqnCBsd6RMkjVDRZzb', 'Xb7hH8MSUJpSbSDYk0k2']
|
||||
# Adam, Sarah, George, Alice
|
||||
stt:
|
||||
openai:
|
||||
apiKey: '${STT_API_KEY}'
|
||||
model: 'whisper-1'
|
||||
# rateLimits:
|
||||
# fileUploads:
|
||||
# ipMax: 100
|
||||
# ipWindowInMinutes: 60 # Rate limit window for file uploads per IP
|
||||
# userMax: 50
|
||||
# userWindowInMinutes: 60 # Rate limit window for file uploads per user
|
||||
# conversationsImport:
|
||||
# ipMax: 100
|
||||
# ipWindowInMinutes: 60 # Rate limit window for conversation imports per IP
|
||||
# userMax: 50
|
||||
# userWindowInMinutes: 60 # Rate limit window for conversation imports per user
|
||||
|
||||
# Example Actions Object Structure
|
||||
actions:
|
||||
allowedDomains:
|
||||
- 'swapi.dev'
|
||||
- 'librechat.ai'
|
||||
- 'google.com'
|
||||
- 'sidepiece.rip'
|
||||
- 'baked.rocks'
|
||||
- 'raindrop.com'
|
||||
- 'raindrop.services'
|
||||
|
||||
# Example MCP Servers Object Structure
|
||||
mcpServers:
|
||||
pieces:
|
||||
type: "streamable-http"
|
||||
url: https://pieces-mcp.baked.rocks/mcp
|
||||
timeout: 60000
|
||||
startup: false
|
||||
xpipe:
|
||||
type: "streamable-http"
|
||||
url: https://xpipe-mcp.baked.rocks/mcp
|
||||
timeout: 60000
|
||||
startup: false
|
||||
firecrawl:
|
||||
type: stdio
|
||||
command: npx
|
||||
args:
|
||||
- -y
|
||||
- firecrawl-mcp
|
||||
env:
|
||||
FIRECRAWL_API_KEY: dummy-key
|
||||
FIRECRAWL_API_URL: http://crawl.toy
|
||||
context7:
|
||||
type: "streamable-http"
|
||||
url: https://mcp.context7.com/mcp
|
||||
timeout: 60000
|
||||
startup: false
|
||||
headers:
|
||||
CONTEXT7_API_KEY: ctx7sk-f6f1b998-88a2-4e78-9d21-433545326e6c
|
||||
# everything:
|
||||
# # type: sse # type can optionally be omitted
|
||||
# url: http://localhost:3001/sse
|
||||
# timeout: 60000 # 1 minute timeout for this server, this is the default timeout for MCP servers.
|
||||
# puppeteer:
|
||||
# type: stdio
|
||||
# command: npx
|
||||
# args:
|
||||
# - -y
|
||||
# - "@modelcontextprotocol/server-puppeteer"
|
||||
# timeout: 300000 # 5 minutes timeout for this server
|
||||
filesystem:
|
||||
type: stdio
|
||||
command: npx
|
||||
args:
|
||||
- -y
|
||||
- "@modelcontextprotocol/server-filesystem"
|
||||
- /mnt/claude-scripts
|
||||
- /mnt/apps
|
||||
- /mnt/biz-bud
|
||||
- /mnt/portainer
|
||||
- /mnt/repos
|
||||
# iconPath: /app/client/public/assets/logo.svg # Fixed: use container path if logo exists
|
||||
# mcp-obsidian:
|
||||
# command: npx
|
||||
# args:
|
||||
# - -y
|
||||
# - "mcp-obsidian"
|
||||
# - /path/to/obsidian/vault
|
||||
sequential-thinking:
|
||||
url: https://server.smithery.ai/@smithery-ai/server-sequential-thinking/mcp
|
||||
timeout: 60000
|
||||
|
||||
# Definition of custom endpoints
|
||||
endpoints:
|
||||
assistants:
|
||||
disableBuilder: false # Disable Assistants Builder Interface by setting to `true`
|
||||
pollIntervalMs: 3000 # Polling interval for checking assistant updates
|
||||
timeoutMs: 180000 # Timeout for assistant operations
|
||||
# # Should only be one or the other, either `supportedIds` or `excludedIds`
|
||||
# supportedIds: ["asst_supportedAssistantId1", "asst_supportedAssistantId2"]
|
||||
# # excludedIds: ["asst_excludedAssistantId"]
|
||||
# # Only show assistants that the user created or that were created externally (e.g. in Assistants playground).
|
||||
privateAssistants: false # Does not work with `supportedIds` or `excludedIds`
|
||||
# # (optional) Models that support retrieval, will default to latest known OpenAI models that support the feature
|
||||
retrievalModels: ["openai/gpt-5.1"]
|
||||
# # (optional) Assistant Capabilities available to all users. Omit the ones you wish to exclude. Defaults to list below.
|
||||
capabilities: ["code_interpreter", "retrieval", "actions", "tools", "image_vision"]
|
||||
agents:
|
||||
# # (optional) Default recursion depth for agents, defaults to 25
|
||||
recursionLimit: 50
|
||||
# # (optional) Max recursion depth for agents, defaults to 25
|
||||
maxRecursionLimit: 100
|
||||
# # (optional) Disable the builder interface for agents
|
||||
disableBuilder: false
|
||||
# # (optional) Maximum total citations to include in agent responses, defaults to 30
|
||||
maxCitations: 30
|
||||
# # (optional) Maximum citations per file to include in agent responses, defaults to 7
|
||||
maxCitationsPerFile: 7
|
||||
# # (optional) Minimum relevance score for sources to be included in responses, defaults to 0.45 (45% relevance threshold)
|
||||
# # Set to 0.0 to show all sources (no filtering), or higher like 0.7 for stricter filtering
|
||||
minRelevanceScore: 0.4
|
||||
# # (optional) Agent Capabilities available to all users. Omit the ones you wish to exclude. Defaults to list below.
|
||||
capabilities: ["execute_code", "file_search", "actions", "tools", "web_search", "ocr", "artifacts", "chain", "context"]
|
||||
allowedProviders:
|
||||
- litellm
|
||||
custom:
|
||||
# Groq Example
|
||||
- name: 'litellm'
|
||||
apiKey: 'sk-1234'
|
||||
baseURL: 'http://llm.toy'
|
||||
models:
|
||||
default:
|
||||
- 'claude-sonnet-4-5'
|
||||
- 'claude-opus-4-5'
|
||||
- 'claude-haiku-4-5'
|
||||
- 'rerank-v3.5'
|
||||
- 'deepgram/base'
|
||||
- 'deepgram/nova-3'
|
||||
- 'deepgram/nova-3-general'
|
||||
- 'deepgram/whisper'
|
||||
- 'deepgram/whisper-base'
|
||||
- 'deepgram/whisper-large'
|
||||
- 'deepgram/whisper-medium'
|
||||
- 'deepgram/whisper-small'
|
||||
- 'deepgram/whisper-tiny'
|
||||
- 'elevenlabs/scribe_v1'
|
||||
- 'fireworks_ai/glm-4p7'
|
||||
- 'gemini/imagen-4.0-ultra-generate-001'
|
||||
- 'ollama/gpt-oss:20b'
|
||||
- 'gpt-5.2'
|
||||
- 'gemini/gemini-3-pro-preview'
|
||||
- 'gemini/gemini-3-flash-preview'
|
||||
- 'gpt-realtime-mini'
|
||||
- 'text-embedding-3-large'
|
||||
- 'text-embedding-3-small'
|
||||
- 'fireworks_ai/deepseek-v3p2'
|
||||
- 'fireworks_ai/kimi-k2-instruct'
|
||||
- 'gpt-realtime'
|
||||
- 'tts-1'
|
||||
- 'tts-1-hd'
|
||||
- 'whisper-1'
|
||||
- 'fireworks_ai/qwen3-vl-235b-a22b-instruct'
|
||||
- 'fireworks_ai/gpt-oss-120b'
|
||||
- 'fireworks_ai/minimax-m2p1'
|
||||
- 'gemini/imagen-4.0-generate-001'
|
||||
- 'gemini/imagen-4.0-fast-generate-001'
|
||||
- 'fireworks_ai/glm-4p6'
|
||||
- 'fireworks_ai/kimi-k2-thinking'
|
||||
- 'fireworks_ai/qwen3-vl-235b-a22b-thinking'
|
||||
fetch: false
|
||||
titleConvo: true
|
||||
titleModel: 'fireworks_ai/gpt-oss-120b'
|
||||
modelDisplayLabel: 'LLM'
|
||||
# Summarize setting: Set to true to enable summarization.
|
||||
summarize: true
|
||||
# Summary Model: Specify the model to use if summarization is enabled.
|
||||
summaryModel: "fireworks_ai/qwen3-vl-235b-a22b-instruct" # Defaults to "gpt-3.5-turbo" if omitted.
|
||||
# Force Prompt setting: If true, sends a `prompt` parameter instead of `messages`.
|
||||
forcePrompt: false
|
||||
dropParams: ['stop', 'user', 'frequency_penalty', 'presence_penalty']
|
||||
|
||||
# - name: 'groq'
|
||||
# apiKey: '${GROQ_API_KEY}'
|
||||
# baseURL: 'https://api.groq.com/openai/v1/'
|
||||
# models:
|
||||
# default:
|
||||
# - 'llama3-70b-8192'
|
||||
# - 'llama3-8b-8192'
|
||||
# - 'llama2-70b-4096'
|
||||
# - 'mixtral-8x7b-32768'
|
||||
# - 'gemma-7b-it'
|
||||
# fetch: false
|
||||
# titleConvo: true
|
||||
# titleModel: 'mixtral-8x7b-32768'
|
||||
# modelDisplayLabel: 'groq'
|
||||
|
||||
# # Mistral AI Example
|
||||
# - name: 'Mistral' # Unique name for the endpoint
|
||||
# # For `apiKey` and `baseURL`, you can use environment variables that you define.
|
||||
# # recommended environment variables:
|
||||
# apiKey: '${MISTRAL_API_KEY}'
|
||||
# baseURL: 'https://api.mistral.ai/v1'
|
||||
|
||||
# # Models configuration
|
||||
# models:
|
||||
# # List of default models to use. At least one value is required.
|
||||
# default: ['mistral-tiny', 'mistral-small', 'mistral-medium']
|
||||
# # Fetch option: Set to true to fetch models from API.
|
||||
# fetch: true # Defaults to false.
|
||||
|
||||
# # Optional configurations
|
||||
|
||||
# # Title Conversation setting
|
||||
# titleConvo: true # Set to true to enable title conversation
|
||||
|
||||
# # Title Method: Choose between "completion" or "functions".
|
||||
# # titleMethod: "completion" # Defaults to "completion" if omitted.
|
||||
|
||||
# # Title Model: Specify the model to use for titles.
|
||||
# titleModel: 'mistral-tiny' # Defaults to "gpt-3.5-turbo" if omitted.
|
||||
|
||||
# # Summarize setting: Set to true to enable summarization.
|
||||
# # summarize: false
|
||||
|
||||
# # Summary Model: Specify the model to use if summarization is enabled.
|
||||
# # summaryModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
|
||||
|
||||
# # Force Prompt setting: If true, sends a `prompt` parameter instead of `messages`.
|
||||
# # forcePrompt: false
|
||||
|
||||
# # The label displayed for the AI model in messages.
|
||||
# modelDisplayLabel: 'Mistral' # Default is "AI" when not set.
|
||||
|
||||
# # Add additional parameters to the request. Default params will be overwritten.
|
||||
# # addParams:
|
||||
# # safe_prompt: true # This field is specific to Mistral AI: https://docs.mistral.ai/api/
|
||||
|
||||
# # Drop Default params parameters from the request. See default params in guide linked below.
|
||||
# # NOTE: For Mistral, it is necessary to drop the following parameters or you will encounter a 422 Error:
|
||||
# dropParams: ['stop', 'user', 'frequency_penalty', 'presence_penalty']
|
||||
|
||||
# # OpenRouter Example
|
||||
# - name: 'OpenRouter'
|
||||
# # For `apiKey` and `baseURL`, you can use environment variables that you define.
|
||||
# # recommended environment variables:
|
||||
# apiKey: '${OPENROUTER_KEY}'
|
||||
# baseURL: 'https://openrouter.ai/api/v1'
|
||||
# headers:
|
||||
# x-librechat-body-parentmessageid: '{{LIBRECHAT_BODY_PARENTMESSAGEID}}'
|
||||
# models:
|
||||
# default: ['meta-llama/llama-3-70b-instruct']
|
||||
# fetch: true
|
||||
# titleConvo: true
|
||||
# titleModel: 'meta-llama/llama-3-70b-instruct'
|
||||
# # Recommended: Drop the stop parameter from the request as Openrouter models use a variety of stop tokens.
|
||||
# dropParams: ['stop']
|
||||
# modelDisplayLabel: 'OpenRouter'
|
||||
|
||||
# # Helicone Example
|
||||
# - name: 'Helicone'
|
||||
# # For `apiKey` and `baseURL`, you can use environment variables that you define.
|
||||
# # recommended environment variables:
|
||||
# apiKey: '${HELICONE_KEY}'
|
||||
# baseURL: 'https://ai-gateway.helicone.ai'
|
||||
# headers:
|
||||
# x-librechat-body-parentmessageid: '{{LIBRECHAT_BODY_PARENTMESSAGEID}}'
|
||||
# models:
|
||||
# default: ['gpt-4o-mini', 'claude-4.5-sonnet', 'llama-3.1-8b-instruct', 'gemini-2.5-flash-lite']
|
||||
# fetch: true
|
||||
# titleConvo: true
|
||||
# titleModel: 'gpt-4o-mini'
|
||||
# modelDisplayLabel: 'Helicone'
|
||||
# iconURL: https://marketing-assets-helicone.s3.us-west-2.amazonaws.com/helicone.png
|
||||
|
||||
# # Portkey AI Example
|
||||
# - name: 'Portkey'
|
||||
# apiKey: 'dummy'
|
||||
# baseURL: 'https://api.portkey.ai/v1'
|
||||
# headers:
|
||||
# x-portkey-api-key: '${PORTKEY_API_KEY}'
|
||||
# x-portkey-virtual-key: '${PORTKEY_OPENAI_VIRTUAL_KEY}'
|
||||
# models:
|
||||
# default: ['gpt-4o-mini', 'gpt-4o', 'chatgpt-4o-latest']
|
||||
# fetch: true
|
||||
# titleConvo: true
|
||||
# titleModel: 'current_model'
|
||||
# summarize: false
|
||||
# summaryModel: 'current_model'
|
||||
# forcePrompt: false
|
||||
# modelDisplayLabel: 'Portkey'
|
||||
# iconURL: https://images.crunchbase.com/image/upload/c_pad,f_auto,q_auto:eco,dpr_1/rjqy7ghvjoiu4cd1xjbf
|
||||
# Example modelSpecs configuration showing grouping options
|
||||
# The 'group' field organizes model specs in the UI selector:
|
||||
# - If 'group' matches an endpoint name (e.g., "openAI", "groq"), the spec appears nested under that endpoint
|
||||
# - If 'group' is a custom name (doesn't match any endpoint), it creates a separate collapsible section
|
||||
# - If 'group' is omitted, the spec appears as a standalone item at the top level
|
||||
# modelSpecs:
|
||||
# list:
|
||||
# # Example 1: Nested under an endpoint (grouped with openAI endpoint)
|
||||
# - name: "gpt-4o"
|
||||
# label: "GPT-4 Optimized"
|
||||
# description: "Most capable GPT-4 model with multimodal support"
|
||||
# group: "openAI" # String value matching the endpoint name
|
||||
# preset:
|
||||
# endpoint: "openAI"
|
||||
# model: "gpt-4o"
|
||||
#
|
||||
# # Example 2: Nested under a custom endpoint (grouped with groq endpoint)
|
||||
# - name: "llama3-70b-8192"
|
||||
# label: "Llama 3 70B"
|
||||
# description: "Fastest inference available - great for quick responses"
|
||||
# group: "groq" # String value matching your custom endpoint name from endpoints.custom
|
||||
# preset:
|
||||
# endpoint: "groq"
|
||||
# model: "llama3-70b-8192"
|
||||
#
|
||||
# # Example 3: Custom group (creates a separate collapsible section)
|
||||
# - name: "coding-assistant"
|
||||
# label: "Coding Assistant"
|
||||
# description: "Specialized for coding tasks"
|
||||
# group: "my-assistants" # Custom string - doesn't match any endpoint, so creates its own group
|
||||
# preset:
|
||||
# endpoint: "openAI"
|
||||
# model: "gpt-4o"
|
||||
# instructions: "You are an expert coding assistant..."
|
||||
# temperature: 0.3
|
||||
#
|
||||
# - name: "writing-assistant"
|
||||
# label: "Writing Assistant"
|
||||
# description: "Specialized for creative writing"
|
||||
# group: "my-assistants" # Same custom group name - both specs appear in same section
|
||||
# preset:
|
||||
# endpoint: "anthropic"
|
||||
# model: "claude-sonnet-4"
|
||||
# instructions: "You are a creative writing expert..."
|
||||
#
|
||||
# # Example 4: Standalone (no group - appears at top level)
|
||||
# - name: "general-assistant"
|
||||
# label: "General Assistant"
|
||||
# description: "General purpose assistant"
|
||||
# # No 'group' field - appears as standalone item at top level (not nested)
|
||||
# preset:
|
||||
# endpoint: "openAI"
|
||||
# model: "gpt-4o-mini"
|
||||
|
||||
fileConfig:
|
||||
endpoints:
|
||||
assistants:
|
||||
fileLimit: 5
|
||||
fileSizeLimit: 10 # Maximum size for an individual file in MB
|
||||
totalSizeLimit: 50 # Maximum total size for all files in a single request in MB
|
||||
supportedMimeTypes:
|
||||
- "image/.*"
|
||||
- "application/pdf"
|
||||
openAI:
|
||||
disabled: true # Disables file uploading to the OpenAI endpoint
|
||||
default:
|
||||
totalSizeLimit: 100
|
||||
fileSizeLimit: 100
|
||||
fileLimit: 10
|
||||
# bifrost:
|
||||
# fileLimit: 25
|
||||
# fileSizeLimit: 50
|
||||
serverFileSizeLimit: 1000 # Global server file size limit in MB
|
||||
fileTokenLimit: 100000
|
||||
avatarSizeLimit: 2 # Limit for user avatar image size in MB
|
||||
imageGeneration: # Image Gen settings, either percentage or px
|
||||
percentage: 100
|
||||
px: 1024
|
||||
ocr:
|
||||
supportedMimeTypes:
|
||||
- "^image/(jpeg|gif|png|webp|heic|heif)$"
|
||||
- "^application/pdf$"
|
||||
- "^application/vnd\\.openxmlformats-officedocument\\.(wordprocessingml\\.document|presentationml\\.presentation|spreadsheetml\\.sheet)$"
|
||||
- "^application/vnd\\.ms-(word|powerpoint|excel)$"
|
||||
- "^application/epub\\+zip$"
|
||||
text:
|
||||
supportedMimeTypes:
|
||||
- "^text/(plain|markdown|csv|json|xml|html|css|javascript|typescript|x-python|x-java|x-csharp|x-php|x-ruby|x-go|x-rust|x-kotlin|x-swift|x-scala|x-perl|x-lua|x-shell|x-sql|x-yaml|x-toml)$"
|
||||
stt:
|
||||
supportedMimeTypes:
|
||||
- "^audio/(mp3|mpeg|mpeg3|wav|wave|x-wav|ogg|vorbis|mp4|x-m4a|flac|x-flac|webm)$"
|
||||
# Client-side image resizing to prevent upload errors
|
||||
clientImageResize:
|
||||
enabled: true # Enable/disable client-side image resizing (default: false)
|
||||
maxWidth: 1900 # Maximum width for resized images (default: 1900)
|
||||
maxHeight: 1900 # Maximum height for resized images (default: 1900)
|
||||
quality: 0.92 # JPEG quality for compression (0.0-1.0, default: 0.92)
|
||||
# See the Custom Configuration Guide for more information on Assistants Config:
|
||||
# https://www.librechat.ai/docs/configuration/librechat_yaml/object_structure/assistants_endpoint
|
||||
|
||||
# Web Search Configuration (optional)
|
||||
webSearch:
|
||||
rerankerType: 'cohere'
|
||||
# Jina Reranking Configuration
|
||||
# jinaApiKey: '${JINA_API_KEY}' # Your Jina API key
|
||||
# jinaApiUrl: '${JINA_API_URL}' # Custom Jina API URL (optional, defaults to https://api.jina.ai/v1/rerank)
|
||||
# Other rerankers
|
||||
cohereApiKey: '${COHERE_API_KEY}'
|
||||
# Search providers
|
||||
searchProvider: "searxng"
|
||||
# serperApiKey: '${SERPER_API_KEY}'
|
||||
searxngInstanceUrl: '${SEARXNG_INSTANCE_URL}'
|
||||
# searxngApiKey: '${SEARXNG_API_KEY}'
|
||||
# Content scrapers
|
||||
scraperProvider: "firecrawl"
|
||||
firecrawlApiKey: '${FIRECRAWL_API_KEY}'
|
||||
firecrawlApiUrl: '${FIRECRAWL_API_URL}'
|
||||
firecrawlVersion: "${FIRECRAWL_VERSION}"
|
||||
# Memory configuration for user memories
|
||||
memory:
|
||||
# (optional) Disable memory functionality
|
||||
disabled: false
|
||||
# (optional) Restrict memory keys to specific values to limit memory storage and improve consistency
|
||||
validKeys: ["preferences", "work_info", "personal_info", "skills", "interests", "context"]
|
||||
# (optional) Maximum token limit for memory storage (not yet implemented for token counting)
|
||||
tokenLimit: 10000
|
||||
# (optional) Enable personalization features (defaults to true if memory is configured)
|
||||
# When false, users will not see the Personalization tab in settings
|
||||
personalize: true
|
||||
# Memory agent configuration - either use an existing agent by ID or define inline
|
||||
agent:
|
||||
# Option 1: Use existing agent by ID
|
||||
# id: "your-memory-agent-id"
|
||||
# Option 2: Define agent inline
|
||||
provider: "litellm"
|
||||
model: "fireworks_ai/qwen3-vl-30b-a3b-instruct"
|
||||
instructions: "You are a memory management assistant. Store and manage user information accurately and do not embellish the information."
|
||||
# model_parameters:
|
||||
# temperature: 0.1
|
||||
797
stack.env
Normal file
797
stack.env
Normal file
@@ -0,0 +1,797 @@
|
||||
#=====================================================================#
|
||||
# LibreChat Configuration #
|
||||
#=====================================================================#
|
||||
# Please refer to the reference documentation for assistance #
|
||||
# with configuring your LibreChat environment. #
|
||||
# #
|
||||
# https://www.librechat.ai/docs/configuration/dotenv #
|
||||
#=====================================================================#
|
||||
|
||||
#==================================================#
|
||||
# Server Configuration #
|
||||
#==================================================#
|
||||
|
||||
HOST=localhost
|
||||
PORT=3080
|
||||
|
||||
MONGO_URI=mongodb://127.0.0.1:27017/LibreChat
|
||||
#The maximum number of connections in the connection pool. */
|
||||
MONGO_MAX_POOL_SIZE=
|
||||
#The minimum number of connections in the connection pool. */
|
||||
MONGO_MIN_POOL_SIZE=
|
||||
#The maximum number of connections that may be in the process of being established concurrently by the connection pool. */
|
||||
MONGO_MAX_CONNECTING=
|
||||
#The maximum number of milliseconds that a connection can remain idle in the pool before being removed and closed. */
|
||||
MONGO_MAX_IDLE_TIME_MS=
|
||||
#The maximum time in milliseconds that a thread can wait for a connection to become available. */
|
||||
MONGO_WAIT_QUEUE_TIMEOUT_MS=
|
||||
# Set to false to disable automatic index creation for all models associated with this connection. */
|
||||
MONGO_AUTO_INDEX=
|
||||
# Set to `false` to disable Mongoose automatically calling `createCollection()` on every model created on this connection. */
|
||||
MONGO_AUTO_CREATE=
|
||||
|
||||
DOMAIN_CLIENT=https://chat.baked.rocks
|
||||
DOMAIN_SERVER=https://chat.baked.rocks
|
||||
|
||||
NO_INDEX=true
|
||||
# Use the address that is at most n number of hops away from the Express application.
|
||||
# req.socket.remoteAddress is the first hop, and the rest are looked for in the X-Forwarded-For header from right to left.
|
||||
# A value of 0 means that the first untrusted address would be req.socket.remoteAddress, i.e. there is no reverse proxy.
|
||||
# Defaulted to 1.
|
||||
TRUST_PROXY=1
|
||||
|
||||
# Minimum password length for user authentication
|
||||
# Default: 8
|
||||
# Note: When using LDAP authentication, you may want to set this to 1
|
||||
# to bypass local password validation, as LDAP servers handle their own
|
||||
# password policies.
|
||||
# MIN_PASSWORD_LENGTH=8
|
||||
|
||||
#===============#
|
||||
# JSON Logging #
|
||||
#===============#
|
||||
|
||||
# Use when process console logs in cloud deployment like GCP/AWS
|
||||
CONSOLE_JSON=false
|
||||
|
||||
#===============#
|
||||
# Debug Logging #
|
||||
#===============#
|
||||
|
||||
DEBUG_LOGGING=true
|
||||
DEBUG_CONSOLE=false
|
||||
|
||||
#=============#
|
||||
# Permissions #
|
||||
#=============#
|
||||
|
||||
# UID=1000
|
||||
# GID=1000
|
||||
|
||||
#===============#
|
||||
# Configuration #
|
||||
#===============#
|
||||
# Use an absolute path, a relative path, or a URL
|
||||
# Note: Using mounted file from deploy-compose.swarm.yml instead of remote URL
|
||||
|
||||
CONFIG_PATH=/app/librechat.yaml
|
||||
|
||||
#===================================================#
|
||||
# Endpoints #
|
||||
#===================================================#
|
||||
|
||||
ENDPOINTS=openAI,assistants,google,anthropic,custom,fireworks,openrouter,deepseek,perplexity,groq,cohere,mistral,agents
|
||||
|
||||
PROXY=
|
||||
|
||||
#===================================#
|
||||
# Known Endpoints - librechat.yaml #
|
||||
#===================================#
|
||||
# https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints
|
||||
|
||||
# ANYSCALE_API_KEY=
|
||||
# APIPIE_API_KEY=
|
||||
# COHERE_API_KEY=user_provided # Duplicate removed - see line 779 for actual value
|
||||
DEEPSEEK_API_KEY=user_provided
|
||||
# DATABRICKS_API_KEY=
|
||||
FIREWORKS_API_KEY=user_provided
|
||||
GROQ_API_KEY=user_provided
|
||||
# HUGGINGFACE_TOKEN=
|
||||
MISTRAL_API_KEY=user_provided
|
||||
OPENROUTER_KEY=user_provided
|
||||
PERPLEXITY_API_KEY=user_provided
|
||||
# SHUTTLEAI_API_KEY=
|
||||
# TOGETHERAI_API_KEY=
|
||||
# UNIFY_API_KEY=
|
||||
# XAI_API_KEY=
|
||||
|
||||
#============#
|
||||
# Anthropic #
|
||||
#============#
|
||||
|
||||
ANTHROPIC_API_KEY=user_provided
|
||||
# ANTHROPIC_MODELS=claude-opus-4-20250514,claude-sonnet-4-20250514,claude-3-7-sonnet-20250219,claude-3-5-sonnet-20241022,claude-3-5-haiku-20241022,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307
|
||||
# ANTHROPIC_REVERSE_PROXY=
|
||||
|
||||
#============#
|
||||
# Azure #
|
||||
#============#
|
||||
|
||||
# Note: these variables are DEPRECATED
|
||||
# Use the `librechat.yaml` configuration for `azureOpenAI` instead
|
||||
# You may also continue to use them if you opt out of using the `librechat.yaml` configuration
|
||||
|
||||
# AZURE_OPENAI_DEFAULT_MODEL=gpt-3.5-turbo # Deprecated
|
||||
# AZURE_OPENAI_MODELS=gpt-3.5-turbo,gpt-4 # Deprecated
|
||||
# AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE # Deprecated
|
||||
# AZURE_API_KEY= # Deprecated
|
||||
# AZURE_OPENAI_API_INSTANCE_NAME= # Deprecated
|
||||
# AZURE_OPENAI_API_DEPLOYMENT_NAME= # Deprecated
|
||||
# AZURE_OPENAI_API_VERSION= # Deprecated
|
||||
# AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME= # Deprecated
|
||||
# AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME= # Deprecated
|
||||
# PLUGINS_USE_AZURE="true" # Deprecated
|
||||
|
||||
#=================#
|
||||
# AWS Bedrock #
|
||||
#=================#
|
||||
|
||||
# BEDROCK_AWS_DEFAULT_REGION=us-east-1 # A default region must be provided
|
||||
# BEDROCK_AWS_ACCESS_KEY_ID=someAccessKey
|
||||
# BEDROCK_AWS_SECRET_ACCESS_KEY=someSecretAccessKey
|
||||
# BEDROCK_AWS_SESSION_TOKEN=someSessionToken
|
||||
|
||||
# Note: This example list is not meant to be exhaustive. If omitted, all known, supported model IDs will be included for you.
|
||||
# BEDROCK_AWS_MODELS=anthropic.claude-3-5-sonnet-20240620-v1:0,meta.llama3-1-8b-instruct-v1:0
|
||||
|
||||
# See all Bedrock model IDs here: https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns
|
||||
|
||||
# Notes on specific models:
|
||||
# The following models are not support due to not supporting streaming:
|
||||
# ai21.j2-mid-v1
|
||||
|
||||
# The following models are not support due to not supporting conversation history:
|
||||
# ai21.j2-ultra-v1, cohere.command-text-v14, cohere.command-light-text-v14
|
||||
|
||||
#============#
|
||||
# Google #
|
||||
#============#
|
||||
|
||||
GOOGLE_KEY=user_provided
|
||||
|
||||
# GOOGLE_REVERSE_PROXY=
|
||||
# Some reverse proxies do not support the X-goog-api-key header, uncomment to pass the API key in Authorization header instead.
|
||||
# GOOGLE_AUTH_HEADER=true
|
||||
|
||||
# Gemini API (AI Studio)
|
||||
# GOOGLE_MODELS=gemini-2.5-pro,gemini-2.5-flash,gemini-2.5-flash-lite,gemini-2.0-flash,gemini-2.0-flash-lite
|
||||
|
||||
# Vertex AI
|
||||
# GOOGLE_MODELS=gemini-2.5-pro,gemini-2.5-flash,gemini-2.5-flash-lite,gemini-2.0-flash-001,gemini-2.0-flash-lite-001
|
||||
|
||||
# GOOGLE_TITLE_MODEL=gemini-2.0-flash-lite-001
|
||||
|
||||
# GOOGLE_LOC=us-central1
|
||||
|
||||
# Google Safety Settings
|
||||
# NOTE: These settings apply to both Vertex AI and Gemini API (AI Studio)
|
||||
#
|
||||
# For Vertex AI:
|
||||
# To use the BLOCK_NONE setting, you need either:
|
||||
# (a) Access through an allowlist via your Google account team, or
|
||||
# (b) Switch to monthly invoiced billing: https://cloud.google.com/billing/docs/how-to/invoiced-billing
|
||||
#
|
||||
# For Gemini API (AI Studio):
|
||||
# BLOCK_NONE is available by default, no special account requirements.
|
||||
#
|
||||
# Available options: BLOCK_NONE, BLOCK_ONLY_HIGH, BLOCK_MEDIUM_AND_ABOVE, BLOCK_LOW_AND_ABOVE
|
||||
#
|
||||
# GOOGLE_SAFETY_SEXUALLY_EXPLICIT=BLOCK_ONLY_HIGH
|
||||
# GOOGLE_SAFETY_HATE_SPEECH=BLOCK_ONLY_HIGH
|
||||
# GOOGLE_SAFETY_HARASSMENT=BLOCK_ONLY_HIGH
|
||||
# GOOGLE_SAFETY_DANGEROUS_CONTENT=BLOCK_ONLY_HIGH
|
||||
# GOOGLE_SAFETY_CIVIC_INTEGRITY=BLOCK_ONLY_HIGH
|
||||
|
||||
#============#
|
||||
# OpenAI #
|
||||
#============#
|
||||
|
||||
OPENAI_API_KEY=user_provided
|
||||
# OPENAI_MODELS=gpt-5,gpt-5-codex,gpt-5-mini,gpt-5-nano,o3-pro,o3,o4-mini,gpt-4.1,gpt-4.1-mini,gpt-4.1-nano,o3-mini,o1-pro,o1,gpt-4o,gpt-4o-mini
|
||||
|
||||
DEBUG_OPENAI=false
|
||||
|
||||
# TITLE_CONVO=false
|
||||
# OPENAI_TITLE_MODEL=gpt-4o-mini
|
||||
|
||||
# OPENAI_SUMMARIZE=true
|
||||
# OPENAI_SUMMARY_MODEL=gpt-4o-mini
|
||||
|
||||
# OPENAI_FORCE_PROMPT=true
|
||||
|
||||
# OPENAI_REVERSE_PROXY=
|
||||
|
||||
# OPENAI_ORGANIZATION=
|
||||
|
||||
#====================#
|
||||
# Assistants API #
|
||||
#====================#
|
||||
|
||||
ASSISTANTS_API_KEY=user_provided
|
||||
# ASSISTANTS_BASE_URL=
|
||||
# ASSISTANTS_MODELS=gpt-4o,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-16k-0613,gpt-3.5-turbo-16k,gpt-3.5-turbo,gpt-4,gpt-4-0314,gpt-4-32k-0314,gpt-4-0613,gpt-3.5-turbo-0613,gpt-3.5-turbo-1106,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview
|
||||
|
||||
#==========================#
|
||||
# Azure Assistants API #
|
||||
#==========================#
|
||||
|
||||
# Note: You should map your credentials with custom variables according to your Azure OpenAI Configuration
|
||||
# The models for Azure Assistants are also determined by your Azure OpenAI configuration.
|
||||
|
||||
# More info, including how to enable use of Assistants with Azure here:
|
||||
# https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints/azure#using-assistants-with-azure
|
||||
|
||||
#============#
|
||||
# Plugins #
|
||||
#============#
|
||||
|
||||
# PLUGIN_MODELS=gpt-4o,gpt-4o-mini,gpt-4,gpt-4-turbo-preview,gpt-4-0125-preview,gpt-4-1106-preview,gpt-4-0613,gpt-3.5-turbo,gpt-3.5-turbo-0125,gpt-3.5-turbo-1106,gpt-3.5-turbo-0613
|
||||
|
||||
DEBUG_PLUGINS=true
|
||||
|
||||
CREDS_KEY=59381de7f30d104c84654a5553bceb358b3fa030b7db15a0d5ebf4d752a64eb2
|
||||
CREDS_IV=4d446a524e0710eff791135f510c17d5
|
||||
|
||||
# Azure AI Search
|
||||
#-----------------
|
||||
AZURE_AI_SEARCH_SERVICE_ENDPOINT=
|
||||
AZURE_AI_SEARCH_INDEX_NAME=
|
||||
AZURE_AI_SEARCH_API_KEY=
|
||||
|
||||
AZURE_AI_SEARCH_API_VERSION=
|
||||
AZURE_AI_SEARCH_SEARCH_OPTION_QUERY_TYPE=
|
||||
AZURE_AI_SEARCH_SEARCH_OPTION_TOP=
|
||||
AZURE_AI_SEARCH_SEARCH_OPTION_SELECT=
|
||||
|
||||
# OpenAI Image Tools Customization
|
||||
#----------------
|
||||
IMAGE_GEN_OAI_API_KEY=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 # Create or reuse OpenAI API key for image generation tool
|
||||
# IMAGE_GEN_OAI_BASEURL= # Custom OpenAI base URL for image generation tool
|
||||
# IMAGE_GEN_OAI_AZURE_API_VERSION= # Custom Azure OpenAI deployments
|
||||
# IMAGE_GEN_OAI_DESCRIPTION=
|
||||
# IMAGE_GEN_OAI_DESCRIPTION_WITH_FILES=Custom description for image generation tool when files are present
|
||||
# IMAGE_GEN_OAI_DESCRIPTION_NO_FILES=Custom description for image generation tool when no files are present
|
||||
# IMAGE_EDIT_OAI_DESCRIPTION=Custom description for image editing tool
|
||||
# IMAGE_GEN_OAI_PROMPT_DESCRIPTION=Custom prompt description for image generation tool
|
||||
# IMAGE_EDIT_OAI_PROMPT_DESCRIPTION=Custom prompt description for image editing tool
|
||||
|
||||
# DALL·E
|
||||
#----------------
|
||||
# DALLE_API_KEY=
|
||||
# DALLE3_API_KEY=
|
||||
# DALLE2_API_KEY=
|
||||
# DALLE3_SYSTEM_PROMPT=
|
||||
# DALLE2_SYSTEM_PROMPT=
|
||||
# DALLE_REVERSE_PROXY=
|
||||
# DALLE3_BASEURL=
|
||||
# DALLE2_BASEURL=
|
||||
|
||||
# DALL·E (via Azure OpenAI)
|
||||
# Note: requires some of the variables above to be set
|
||||
#----------------
|
||||
# DALLE3_AZURE_API_VERSION=
|
||||
# DALLE2_AZURE_API_VERSION=
|
||||
|
||||
# Flux
|
||||
#-----------------
|
||||
FLUX_API_BASE_URL=https://api.us1.bfl.ai
|
||||
# FLUX_API_BASE_URL = 'https://api.bfl.ml';
|
||||
|
||||
# Get your API key at https://api.us1.bfl.ai/auth/profile
|
||||
# FLUX_API_KEY=
|
||||
|
||||
# Google
|
||||
#-----------------
|
||||
GOOGLE_SEARCH_API_KEY=
|
||||
GOOGLE_CSE_ID=
|
||||
|
||||
# YOUTUBE
|
||||
#-----------------
|
||||
YOUTUBE_API_KEY=
|
||||
|
||||
# Stable Diffusion
|
||||
#-----------------
|
||||
SD_WEBUI_URL=http://host.docker.internal:7860
|
||||
|
||||
# Tavily
|
||||
#-----------------
|
||||
TAVILY_API_KEY=
|
||||
|
||||
# Traversaal
|
||||
#-----------------
|
||||
TRAVERSAAL_API_KEY=
|
||||
|
||||
# WolframAlpha
|
||||
#-----------------
|
||||
WOLFRAM_APP_ID=
|
||||
|
||||
# Zapier
|
||||
#-----------------
|
||||
ZAPIER_NLA_API_KEY=
|
||||
|
||||
#==================================================#
|
||||
# Search #
|
||||
#==================================================#
|
||||
|
||||
SEARCH=true
|
||||
MEILI_NO_ANALYTICS=true
|
||||
MEILI_HOST=http://0.0.0.0:7700
|
||||
MEILI_MASTER_KEY=5e3490d4b8da39d3195132ccf9a77f71
|
||||
|
||||
# Optional: Disable indexing, useful in a multi-node setup
|
||||
# where only one instance should perform an index sync.
|
||||
# MEILI_NO_SYNC=true
|
||||
|
||||
#==================================================#
|
||||
# Speech to Text & Text to Speech #
|
||||
#==================================================#
|
||||
|
||||
STT_API_KEY=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
|
||||
TTS_API_KEY=sk_9a8e3153ba1e3133a4d87cd0dafa723e24d997ab3a0b8870
|
||||
|
||||
#==================================================#
|
||||
# RAG #
|
||||
#==================================================#
|
||||
# More info: https://www.librechat.ai/docs/configuration/rag_api
|
||||
RAG_API_URL=http://host.docker.internal:8000
|
||||
# RAG_OPENAI_BASEURL=
|
||||
RAG_OPENAI_API_KEY=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
|
||||
# RAG_USE_FULL_CONTEXT=
|
||||
EMBEDDINGS_PROVIDER=openai
|
||||
EMBEDDINGS_MODEL=text-embedding-3-small
|
||||
|
||||
#===================================================#
|
||||
# User System #
|
||||
#===================================================#
|
||||
|
||||
#========================#
|
||||
# Moderation #
|
||||
#========================#
|
||||
|
||||
OPENAI_MODERATION=false
|
||||
OPENAI_MODERATION_API_KEY=
|
||||
# OPENAI_MODERATION_REVERSE_PROXY=
|
||||
|
||||
BAN_VIOLATIONS=true
|
||||
BAN_DURATION=1000 * 60 * 60 * 2
|
||||
BAN_INTERVAL=20
|
||||
|
||||
LOGIN_VIOLATION_SCORE=1
|
||||
REGISTRATION_VIOLATION_SCORE=1
|
||||
CONCURRENT_VIOLATION_SCORE=1
|
||||
MESSAGE_VIOLATION_SCORE=1
|
||||
NON_BROWSER_VIOLATION_SCORE=20
|
||||
TTS_VIOLATION_SCORE=0
|
||||
STT_VIOLATION_SCORE=0
|
||||
FORK_VIOLATION_SCORE=0
|
||||
IMPORT_VIOLATION_SCORE=0
|
||||
FILE_UPLOAD_VIOLATION_SCORE=0
|
||||
|
||||
LOGIN_MAX=20
|
||||
LOGIN_WINDOW=5
|
||||
REGISTER_MAX=20
|
||||
REGISTER_WINDOW=60
|
||||
|
||||
LIMIT_CONCURRENT_MESSAGES=true
|
||||
CONCURRENT_MESSAGE_MAX=3
|
||||
|
||||
LIMIT_MESSAGE_IP=true
|
||||
MESSAGE_IP_MAX=40
|
||||
MESSAGE_IP_WINDOW=1
|
||||
|
||||
LIMIT_MESSAGE_USER=false
|
||||
MESSAGE_USER_MAX=40
|
||||
MESSAGE_USER_WINDOW=1
|
||||
|
||||
ILLEGAL_MODEL_REQ_SCORE=5
|
||||
|
||||
#========================#
|
||||
# Balance #
|
||||
#========================#
|
||||
|
||||
# CHECK_BALANCE=false
|
||||
# START_BALANCE=20000 # note: the number of tokens that will be credited after registration.
|
||||
|
||||
#========================#
|
||||
# Registration and Login #
|
||||
#========================#
|
||||
|
||||
ALLOW_EMAIL_LOGIN=true
|
||||
ALLOW_REGISTRATION=true
|
||||
ALLOW_SOCIAL_LOGIN=true
|
||||
ALLOW_SOCIAL_REGISTRATION=true
|
||||
ALLOW_PASSWORD_RESET=false
|
||||
# ALLOW_ACCOUNT_DELETION=true # note: enabled by default if omitted/commented out
|
||||
ALLOW_UNVERIFIED_EMAIL_LOGIN=true
|
||||
|
||||
SESSION_EXPIRY=1000 * 60 * 15
|
||||
REFRESH_TOKEN_EXPIRY=(1000 * 60 * 60 * 24) * 7
|
||||
|
||||
JWT_SECRET=4a7accae9dd0409750bc9e0b1c24a3ad7d19abad0e138a6d93d29dd3f82357b2
|
||||
JWT_REFRESH_SECRET=a49727968071ad906c812ae7015453c8142168b1afd6694205e7da8aca614eb0
|
||||
|
||||
# Discord
|
||||
DISCORD_CLIENT_ID=
|
||||
DISCORD_CLIENT_SECRET=
|
||||
DISCORD_CALLBACK_URL=/oauth/discord/callback
|
||||
|
||||
# Facebook
|
||||
FACEBOOK_CLIENT_ID=
|
||||
FACEBOOK_CLIENT_SECRET=
|
||||
FACEBOOK_CALLBACK_URL=/oauth/facebook/callback
|
||||
|
||||
# GitHub
|
||||
GITHUB_CLIENT_ID=
|
||||
GITHUB_CLIENT_SECRET=
|
||||
GITHUB_CALLBACK_URL=/oauth/github/callback
|
||||
# GitHub Enterprise
|
||||
# GITHUB_ENTERPRISE_BASE_URL=
|
||||
# GITHUB_ENTERPRISE_USER_AGENT=
|
||||
|
||||
# Google
|
||||
GOOGLE_CLIENT_ID=
|
||||
GOOGLE_CLIENT_SECRET=
|
||||
GOOGLE_CALLBACK_URL=/oauth/google/callback
|
||||
|
||||
# Apple
|
||||
APPLE_CLIENT_ID=
|
||||
APPLE_TEAM_ID=
|
||||
APPLE_KEY_ID=
|
||||
APPLE_PRIVATE_KEY_PATH=
|
||||
APPLE_CALLBACK_URL=/oauth/apple/callback
|
||||
|
||||
# OpenID
|
||||
OPENID_CLIENT_ID=WAKWgswi861g5ffBXaUTvFwSKr0PbtjiAadXthOR
|
||||
OPENID_CLIENT_SECRET=fF1p5Le8bcyyag0Itwn91ZydlxwSnqCN2de1pudASxIA4c8phsYGztXdGUjCQes9TGS20YfkmhaP6OabsZY4CptsFGj47RhgjgfowyPljsblrOyJ6yQv8MQsk7p24qpg
|
||||
OPENID_ISSUER=https://auth.baked.rocks/application/o/librechat/.well-known/openid-configuration
|
||||
OPENID_SESSION_SECRET=5685643423f66ee9ad0c743b45c0caaee6c3377463a12c74dc9da2cb1cb19d0f
|
||||
OPENID_SCOPE=openid profile email
|
||||
OPENID_CALLBACK_URL=/oauth/openid/callback
|
||||
OPENID_REQUIRED_ROLE=
|
||||
OPENID_REQUIRED_ROLE_TOKEN_KIND=
|
||||
OPENID_REQUIRED_ROLE_PARAMETER_PATH=
|
||||
OPENID_ADMIN_ROLE=
|
||||
OPENID_ADMIN_ROLE_PARAMETER_PATH=
|
||||
OPENID_ADMIN_ROLE_TOKEN_KIND=
|
||||
# Set to determine which user info property returned from OpenID Provider to store as the User's username
|
||||
OPENID_USERNAME_CLAIM=email
|
||||
# Set to determine which user info property returned from OpenID Provider to store as the User's name
|
||||
OPENID_NAME_CLAIM=name
|
||||
# Optional audience parameter for OpenID authorization requests
|
||||
OPENID_AUDIENCE=
|
||||
OPENID_GENERATE_NONCE=true
|
||||
OPENID_USE_END_SESSION_ENDPOINT=true
|
||||
OPENID_BUTTON_LABEL=Login with Magick
|
||||
OPENID_IMAGE_URL=https://cdn.jsdelivr.net/gh/selfhst/icons/png/authentik.png
|
||||
# Set to true to automatically redirect to the OpenID provider when a user visits the login page
|
||||
# This will bypass the login form completely for users, only use this if OpenID is your only authentication method
|
||||
OPENID_AUTO_REDIRECT=false
|
||||
# Set to true to use PKCE (Proof Key for Code Exchange) for OpenID authentication
|
||||
OPENID_USE_PKCE=false
|
||||
#Set to true to reuse openid tokens for authentication management instead of using the mongodb session and the custom refresh token.
|
||||
OPENID_REUSE_TOKENS=
|
||||
#By default, signing key verification results are cached in order to prevent excessive HTTP requests to the JWKS endpoint.
|
||||
#If a signing key matching the kid is found, this will be cached and the next time this kid is requested the signing key will be served from the cache.
|
||||
#Default is true.
|
||||
OPENID_JWKS_URL_CACHE_ENABLED=true
|
||||
# 600000 ms eq to 10 minutes leave empty to disable caching
|
||||
OPENID_JWKS_URL_CACHE_TIME=6000000
|
||||
#Set to true to trigger token exchange flow to acquire access token for the userinfo endpoint.
|
||||
OPENID_ON_BEHALF_FLOW_FOR_USERINFO_REQUIRED=
|
||||
# example for Scope Needed for Microsoft Graph API
|
||||
OPENID_ON_BEHALF_FLOW_USERINFO_SCOPE=user.read
|
||||
# Set to true to use the OpenID Connect end session endpoint for logout
|
||||
OPENID_USE_END_SESSION_ENDPOINT=true
|
||||
|
||||
#========================#
|
||||
# SharePoint Integration #
|
||||
#========================#
|
||||
# Requires Entra ID (OpenID) authentication to be configured
|
||||
|
||||
# Enable SharePoint file picker in chat and agent panels
|
||||
# ENABLE_SHAREPOINT_FILEPICKER=true
|
||||
|
||||
# SharePoint tenant base URL (e.g., https://yourtenant.sharepoint.com)
|
||||
# SHAREPOINT_BASE_URL=https://yourtenant.sharepoint.com
|
||||
|
||||
# Microsoft Graph API And SharePoint scopes for file picker
|
||||
# SHAREPOINT_PICKER_SHAREPOINT_SCOPE==https://yourtenant.sharepoint.com/AllSites.Read
|
||||
# SHAREPOINT_PICKER_GRAPH_SCOPE=Files.Read.All
|
||||
#========================#
|
||||
|
||||
# SAML
|
||||
# Note: If OpenID is enabled, SAML authentication will be automatically disabled.
|
||||
SAML_ENTRY_POINT=
|
||||
SAML_ISSUER=
|
||||
SAML_CERT=
|
||||
SAML_CALLBACK_URL=/oauth/saml/callback
|
||||
SAML_SESSION_SECRET=
|
||||
|
||||
# Attribute mappings (optional)
|
||||
SAML_EMAIL_CLAIM=
|
||||
SAML_USERNAME_CLAIM=
|
||||
SAML_GIVEN_NAME_CLAIM=
|
||||
SAML_FAMILY_NAME_CLAIM=
|
||||
SAML_PICTURE_CLAIM=
|
||||
SAML_NAME_CLAIM=
|
||||
|
||||
# Logint buttion settings (optional)
|
||||
SAML_BUTTON_LABEL=
|
||||
SAML_IMAGE_URL=
|
||||
|
||||
# Whether the SAML Response should be signed.
|
||||
# - If "true", the entire `SAML Response` will be signed.
|
||||
# - If "false" or unset, only the `SAML Assertion` will be signed (default behavior).
|
||||
# SAML_USE_AUTHN_RESPONSE_SIGNED=
|
||||
|
||||
|
||||
#===============================================#
|
||||
# Microsoft Graph API / Entra ID Integration #
|
||||
#===============================================#
|
||||
|
||||
# Enable Entra ID people search integration in permissions/sharing system
|
||||
# When enabled, the people picker will search both local database and Entra ID
|
||||
USE_ENTRA_ID_FOR_PEOPLE_SEARCH=false
|
||||
|
||||
# When enabled, entra id groups owners will be considered as members of the group
|
||||
ENTRA_ID_INCLUDE_OWNERS_AS_MEMBERS=false
|
||||
|
||||
# Microsoft Graph API scopes needed for people/group search
|
||||
# Default scopes provide access to user profiles and group memberships
|
||||
OPENID_GRAPH_SCOPES=User.Read,People.Read,GroupMember.Read.All
|
||||
|
||||
# LDAP
|
||||
LDAP_URL=
|
||||
LDAP_BIND_DN=
|
||||
LDAP_BIND_CREDENTIALS=
|
||||
LDAP_USER_SEARCH_BASE=
|
||||
#LDAP_SEARCH_FILTER="mail="
|
||||
LDAP_CA_CERT_PATH=
|
||||
# LDAP_TLS_REJECT_UNAUTHORIZED=
|
||||
# LDAP_STARTTLS=
|
||||
# LDAP_LOGIN_USES_USERNAME=true
|
||||
# LDAP_ID=
|
||||
# LDAP_USERNAME=
|
||||
# LDAP_EMAIL=
|
||||
# LDAP_FULL_NAME=
|
||||
|
||||
#========================#
|
||||
# Email Password Reset #
|
||||
#========================#
|
||||
|
||||
EMAIL_SERVICE=
|
||||
EMAIL_HOST=
|
||||
EMAIL_PORT=25
|
||||
EMAIL_ENCRYPTION=
|
||||
EMAIL_ENCRYPTION_HOSTNAME=
|
||||
EMAIL_ALLOW_SELFSIGNED=
|
||||
EMAIL_USERNAME=
|
||||
EMAIL_PASSWORD=
|
||||
EMAIL_FROM_NAME=
|
||||
EMAIL_FROM=noreply@librechat.ai
|
||||
|
||||
#========================#
|
||||
# Mailgun API #
|
||||
#========================#
|
||||
|
||||
MAILGUN_API_KEY=5092859788e1304d7b28901fe41bd6a7-51afd2db-b4b55a79
|
||||
MAILGUN_DOMAIN=thingswithstuff.io
|
||||
EMAIL_FROM=tinkerer@thingswithstuff.io
|
||||
EMAIL_FROM_NAME="LibreChat"
|
||||
|
||||
# # Optional: For EU region
|
||||
# MAILGUN_HOST=https://api.eu.mailgun.net
|
||||
|
||||
#========================#
|
||||
# Firebase CDN #
|
||||
#========================#
|
||||
|
||||
FIREBASE_API_KEY=
|
||||
FIREBASE_AUTH_DOMAIN=
|
||||
FIREBASE_PROJECT_ID=
|
||||
FIREBASE_STORAGE_BUCKET=
|
||||
FIREBASE_MESSAGING_SENDER_ID=
|
||||
FIREBASE_APP_ID=
|
||||
|
||||
#========================#
|
||||
# S3 AWS Bucket #
|
||||
#========================#
|
||||
|
||||
AWS_ENDPOINT_URL=https://io.baked.rocks
|
||||
AWS_ACCESS_KEY_ID=root
|
||||
AWS_SECRET_ACCESS_KEY=squirtle123
|
||||
AWS_REGION=us-east-1
|
||||
AWS_BUCKET_NAME=librechat
|
||||
|
||||
#========================#
|
||||
# Azure Blob Storage #
|
||||
#========================#
|
||||
|
||||
AZURE_STORAGE_CONNECTION_STRING=
|
||||
AZURE_STORAGE_PUBLIC_ACCESS=false
|
||||
AZURE_CONTAINER_NAME=files
|
||||
|
||||
#========================#
|
||||
# Shared Links #
|
||||
#========================#
|
||||
|
||||
ALLOW_SHARED_LINKS=true
|
||||
ALLOW_SHARED_LINKS_PUBLIC=true
|
||||
|
||||
#==============================#
|
||||
# Static File Cache Control #
|
||||
#==============================#
|
||||
|
||||
# Leave commented out to use defaults: 1 day (86400 seconds) for s-maxage and 2 days (172800 seconds) for max-age
|
||||
# NODE_ENV must be set to production for these to take effect
|
||||
# STATIC_CACHE_MAX_AGE=172800
|
||||
# STATIC_CACHE_S_MAX_AGE=86400
|
||||
|
||||
# If you have another service in front of your LibreChat doing compression, disable express based compression here
|
||||
# DISABLE_COMPRESSION=true
|
||||
|
||||
# If you have gzipped version of uploaded image images in the same folder, this will enable gzip scan and serving of these images
|
||||
# Note: The images folder will be scanned on startup and a ma kept in memory. Be careful for large number of images.
|
||||
# ENABLE_IMAGE_OUTPUT_GZIP_SCAN=true
|
||||
|
||||
#===================================================#
|
||||
# UI #
|
||||
#===================================================#
|
||||
|
||||
APP_TITLE=LibreChat
|
||||
# CUSTOM_FOOTER="My custom footer"
|
||||
HELP_AND_FAQ_URL=https://librechat.ai
|
||||
|
||||
# SHOW_BIRTHDAY_ICON=true
|
||||
|
||||
# Google tag manager id
|
||||
#ANALYTICS_GTM_ID=user provided google tag manager id
|
||||
|
||||
# limit conversation file imports to a certain number of bytes in size to avoid the container
|
||||
# maxing out memory limitations by unremarking this line and supplying a file size in bytes
|
||||
# such as the below example of 250 mib
|
||||
# CONVERSATION_IMPORT_MAX_FILE_SIZE_BYTES=262144000
|
||||
|
||||
|
||||
#===============#
|
||||
# REDIS Options #
|
||||
#===============#
|
||||
|
||||
# Enable Redis for caching and session storage
|
||||
USE_REDIS=false
|
||||
|
||||
# Single Redis instance
|
||||
REDIS_URI=redis://192.168.50.210:6379/3
|
||||
|
||||
# Redis cluster (multiple nodes)
|
||||
# REDIS_URI=redis://127.0.0.1:7001,redis://127.0.0.1:7002,redis://127.0.0.1:7003
|
||||
|
||||
# Redis with TLS/SSL encryption and CA certificate
|
||||
# REDIS_URI=rediss://127.0.0.1:6380
|
||||
# REDIS_CA=/path/to/ca-cert.pem
|
||||
|
||||
# Elasticache may need to use an alternate dnsLookup for TLS connections. see "Special Note: Aws Elasticache Clusters with TLS" on this webpage: https://www.npmjs.com/package/ioredis
|
||||
# Enable alternative dnsLookup for redis
|
||||
# REDIS_USE_ALTERNATIVE_DNS_LOOKUP=true
|
||||
|
||||
# Redis authentication (if required)
|
||||
# REDIS_USERNAME=your_redis_username
|
||||
# REDIS_PASSWORD=your_redis_password
|
||||
|
||||
# Redis key prefix configuration
|
||||
# Use environment variable name for dynamic prefix (recommended for cloud deployments)
|
||||
# REDIS_KEY_PREFIX_VAR=K_REVISION
|
||||
# Or use static prefix directly
|
||||
# REDIS_KEY_PREFIX=librechat
|
||||
|
||||
# Redis connection limits
|
||||
# REDIS_MAX_LISTENERS=40
|
||||
|
||||
# Redis ping interval in seconds (0 = disabled, >0 = enabled)
|
||||
# When set to a positive integer, Redis clients will ping the server at this interval to keep connections alive
|
||||
# When unset or 0, no pinging is performed (recommended for most use cases)
|
||||
# REDIS_PING_INTERVAL=300
|
||||
|
||||
# Force specific cache namespaces to use in-memory storage even when Redis is enabled
|
||||
# Comma-separated list of CacheKeys (e.g., ROLES,MESSAGES)
|
||||
FORCED_IN_MEMORY_CACHE_NAMESPACES=APP_CONFIG,CONFIG_STORE
|
||||
|
||||
# Leader Election Configuration (for multi-instance deployments with Redis)
|
||||
# Duration in seconds that the leader lease is valid before it expires (default: 25)
|
||||
# LEADER_LEASE_DURATION=25
|
||||
# Interval in seconds at which the leader renews its lease (default: 10)
|
||||
# LEADER_RENEW_INTERVAL=10
|
||||
# Maximum number of retry attempts when renewing the lease fails (default: 3)
|
||||
# LEADER_RENEW_ATTEMPTS=3
|
||||
# Delay in seconds between retry attempts when renewing the lease (default: 0.5)
|
||||
# LEADER_RENEW_RETRY_DELAY=0.5
|
||||
|
||||
#==================================================#
|
||||
# Others #
|
||||
#==================================================#
|
||||
# You should leave the following commented out #
|
||||
|
||||
# NODE_ENV=
|
||||
|
||||
# E2E_USER_EMAIL=
|
||||
# E2E_USER_PASSWORD=
|
||||
|
||||
#=====================================================#
|
||||
# Cache Headers #
|
||||
#=====================================================#
|
||||
# Headers that control caching of the index.html #
|
||||
# Default configuration prevents caching to ensure #
|
||||
# users always get the latest version. Customize #
|
||||
# only if you understand caching implications. #
|
||||
|
||||
# INDEX_CACHE_CONTROL=no-cache, no-store, must-revalidate
|
||||
# INDEX_PRAGMA=no-cache
|
||||
# INDEX_EXPIRES=0
|
||||
|
||||
# no-cache: Forces validation with server before using cached version
|
||||
# no-store: Prevents storing the response entirely
|
||||
# must-revalidate: Prevents using stale content when offline
|
||||
|
||||
#=====================================================#
|
||||
# OpenWeather #
|
||||
#=====================================================#
|
||||
OPENWEATHER_API_KEY=abac030ace40794a6b3afcd59faf676e
|
||||
|
||||
#====================================#
|
||||
# LibreChat Code Interpreter API #
|
||||
#====================================#
|
||||
|
||||
# https://code.librechat.ai
|
||||
# LIBRECHAT_CODE_API_KEY=your-key
|
||||
|
||||
#======================#
|
||||
# Web Search #
|
||||
#======================#
|
||||
|
||||
# Note: All of the following variable names can be customized.
|
||||
# Omit values to allow user to provide them.
|
||||
|
||||
# For more information on configuration values, see:
|
||||
# https://librechat.ai/docs/features/web_search
|
||||
|
||||
# Search Provider (Required)
|
||||
# SERPER_API_KEY=your_serper_api_key
|
||||
SEARXNG_INSTANCE_URL=https://searxng.sidepiece.rip
|
||||
SEARXNG_API_KEY=squirtle123456
|
||||
|
||||
# Scraper (Required)
|
||||
FIRECRAWL_API_KEY=dummy-key
|
||||
# Optional: Custom Firecrawl API URL
|
||||
FIRECRAWL_API_URL=http://crawl.lab
|
||||
FIRECRAWL_VERSION=v2
|
||||
# Reranker (Required)
|
||||
# JINA_API_KEY=your_jina_api_key
|
||||
# or
|
||||
COHERE_API_KEY=Zx9TS3woEdUrFNpJv7ysM7yN3Bm85Wpq0KTdCUKp
|
||||
|
||||
#======================#
|
||||
# MCP Configuration #
|
||||
#======================#
|
||||
|
||||
# Treat 401/403 responses as OAuth requirement when no oauth metadata found
|
||||
# MCP_OAUTH_ON_AUTH_ERROR=true
|
||||
|
||||
# Timeout for OAuth detection requests in milliseconds
|
||||
# MCP_OAUTH_DETECTION_TIMEOUT=5000
|
||||
|
||||
# Cache connection status checks for this many milliseconds to avoid expensive verification
|
||||
# MCP_CONNECTION_CHECK_TTL=60000
|
||||
|
||||
# Skip code challenge method validation (e.g., for AWS Cognito that supports S256 but doesn't advertise it)
|
||||
# When set to true, forces S256 code challenge even if not advertised in .well-known/openid-configuration
|
||||
# MCP_SKIP_CODE_CHALLENGE_CHECK=false
|
||||
Reference in New Issue
Block a user