git
Some checks failed
Sync Locize Translations & Create Translation PR / Sync Translation Keys with Locize (push) Has been cancelled
Sync Locize Translations & Create Translation PR / Create Translation PR on Version Published (push) Has been cancelled

This commit is contained in:
2025-12-05 00:06:55 +00:00
parent f55bd6f99b
commit cb51b7e0ab
3 changed files with 1511 additions and 0 deletions

219
deploy-compose.swarm.yml Normal file
View File

@@ -0,0 +1,219 @@
version: "3.8"
services:
api:
# build:
# context: .
# dockerfile: Dockerfile.multi
# target: api-build
image: ghcr.io/danny-avila/librechat-dev-api:latest
# ports:
# - 3080:3080
depends_on:
- mongodb
- rag_api
networks:
- net
extra_hosts:
- "host.docker.internal:host-gateway"
env_file:
- stack.env
environment:
- HOST=0.0.0.0
- NODE_ENV=production
- MONGO_URI=mongodb://mongodb:27017/LibreChat
- MEILI_HOST=http://meilisearch:7700
- RAG_PORT=${RAG_PORT:-8000}
- RAG_API_URL=http://rag_api:${RAG_PORT:-8000}
volumes:
- type: bind
source: /home/trav/apps/LibreChat/librechat.yaml
target: /app/librechat.yaml
- /home/trav/apps/LibreChat/images:/app/client/public/images
- /home/trav/apps/LibreChat/uploads:/app/uploads
- /home/trav/apps/LibreChat/logs:/app/api/logs
deploy:
replicas: 1
restart_policy:
condition: any
delay: 15s
max_attempts: 10
window: 2m
update_config:
parallelism: 1
delay: 15s
failure_action: rollback
placement:
constraints:
- node.hostname == little
client:
image: nginx:1.27.0-alpine
# ports:
# - 80:80
# - 443:443
depends_on:
- api
networks:
- net
- badge-net
volumes:
- /home/trav/apps/LibreChat/client/nginx.conf:/etc/nginx/conf.d/default.conf
deploy:
replicas: 1
restart_policy:
condition: any
delay: 15s
max_attempts: 10
window: 2m
update_config:
parallelism: 1
delay: 15s
failure_action: rollback
placement:
constraints:
- node.hostname == little
mongodb:
# ports: # Uncomment this to access mongodb from outside docker, not safe in deployment
# - 27018:27017
image: mongo
networks:
- net
volumes:
- librechat-mongodb:/data/db
command: mongod --noauth
deploy:
replicas: 1
restart_policy:
condition: any
delay: 20s
max_attempts: 15
window: 3m
update_config:
parallelism: 1
delay: 20s
failure_action: rollback
placement:
constraints:
- node.hostname == little
meilisearch:
image: getmeili/meilisearch:v1.12.3
networks:
- net
# ports: # Uncomment this to access meilisearch from outside docker
# - 7700:7700 # if exposing these ports, make sure your master key is not the default value
env_file:
- stack.env
environment:
- MEILI_HOST=http://meilisearch:7700
- MEILI_NO_ANALYTICS=true
volumes:
- librechat-meili_data:/meili_data
deploy:
replicas: 1
restart_policy:
condition: any
delay: 20s
max_attempts: 15
window: 3m
update_config:
parallelism: 1
delay: 20s
failure_action: rollback
placement:
constraints:
- node.hostname == little
vectordb:
image: pgvector/pgvector:0.8.0-pg15-trixie
environment:
POSTGRES_DB: mydatabase
POSTGRES_USER: myuser
POSTGRES_PASSWORD: mypassword
networks:
- net
volumes:
- librechat-pgdata:/var/lib/postgresql/data
deploy:
replicas: 1
restart_policy:
condition: any
delay: 20s
max_attempts: 15
window: 3m
update_config:
parallelism: 1
delay: 20s
failure_action: rollback
placement:
constraints:
- node.hostname == little
rag_api:
image: ghcr.io/danny-avila/librechat-rag-api-dev-lite:latest
environment:
- DB_HOST=vectordb
- RAG_PORT=${RAG_PORT:-8000}
networks:
- net
# depends_on:
# - vectordb
env_file:
- stack.env
deploy:
replicas: 1
restart_policy:
condition: any
delay: 20s
max_attempts: 15
window: 3m
update_config:
parallelism: 1
delay: 20s
failure_action: rollback
placement:
constraints:
- node.hostname == little
metrics:
image: ghcr.io/virtuos/librechat_exporter:main
# depends_on:
# - mongodb
# ports:
# - "8000:8000"
networks:
- net
- observability_observability
deploy:
replicas: 1
restart_policy:
condition: any
delay: 15s
max_attempts: 10
window: 2m
update_config:
parallelism: 1
delay: 15s
failure_action: rollback
placement:
constraints:
- node.hostname == little
volumes:
librechat-pgdata:
name: librechat-pgdata
librechat-mongodb:
name: librechat-mongodb
librechat-meili_data:
name: librechat-meili_data
networks:
net:
driver: overlay
attachable: true
badge-net:
external: true
observability_observability:
external: true

500
librechat.yaml Normal file
View File

@@ -0,0 +1,500 @@
# For more information, see the Configuration Guide:
# https://www.librechat.ai/docs/configuration/librechat_yaml
# Configuration version (required)
version: 1.2.1
# Cache settings: Set to true to enable caching
cache: true
# File storage configuration
# Single strategy for all file types (legacy format, still supported)
# fileStrategy: "s3"
# Granular file storage strategies (new format - recommended)
# Allows different storage strategies for different file types
# fileStrategy:
# avatar: "s3" # Storage for user/agent avatar images
# image: "firebase" # Storage for uploaded images in chats
# document: "local" # Storage for document uploads (PDFs, text files, etc.)
# Available strategies: "local", "s3", "firebase"
# If not specified, defaults to "local" for all file types
# You can mix and match strategies based on your needs:
# - Use S3 for avatars for fast global access
# - Use Firebase for images with automatic optimization
# - Use local storage for documents for privacy/compliance
# Custom interface configuration
interface:
customWelcome: 'Welcome to LibreChat! Enjoy your experience.'
# Enable/disable file search as a chatarea selection (default: true)
# Note: This setting does not disable the Agents File Search Capability.
# To disable the Agents Capability, see the Agents Endpoint configuration instead.
fileSearch: true
# Privacy policy settings
privacyPolicy:
externalUrl: 'https://librechat.ai/privacy-policy'
openNewTab: true
# Terms of service
termsOfService:
externalUrl: 'https://librechat.ai/tos'
openNewTab: true
modalAcceptance: true
modalTitle: 'Terms of Service for LibreChat'
modalContent: |
# Terms and Conditions for LibreChat
*Effective Date: February 18, 2024*
Welcome to LibreChat, the informational website for the open-source AI chat platform, available at https://librechat.ai. These Terms of Service ("Terms") govern your use of our website and the services we offer. By accessing or using the Website, you agree to be bound by these Terms and our Privacy Policy, accessible at https://librechat.ai//privacy.
## 1. Ownership
Upon purchasing a package from LibreChat, you are granted the right to download and use the code for accessing an admin panel for LibreChat. While you own the downloaded code, you are expressly prohibited from reselling, redistributing, or otherwise transferring the code to third parties without explicit permission from LibreChat.
## 2. User Data
We collect personal data, such as your name, email address, and payment information, as described in our Privacy Policy. This information is collected to provide and improve our services, process transactions, and communicate with you.
## 3. Non-Personal Data Collection
The Website uses cookies to enhance user experience, analyze site usage, and facilitate certain functionalities. By using the Website, you consent to the use of cookies in accordance with our Privacy Policy.
## 4. Use of the Website
You agree to use the Website only for lawful purposes and in a manner that does not infringe the rights of, restrict, or inhibit anyone else's use and enjoyment of the Website. Prohibited behavior includes harassing or causing distress or inconvenience to any person, transmitting obscene or offensive content, or disrupting the normal flow of dialogue within the Website.
## 5. Governing Law
These Terms shall be governed by and construed in accordance with the laws of the United States, without giving effect to any principles of conflicts of law.
## 6. Changes to the Terms
We reserve the right to modify these Terms at any time. We will notify users of any changes by email. Your continued use of the Website after such changes have been notified will constitute your consent to such changes.
## 7. Contact Information
If you have any questions about these Terms, please contact us at contact@librechat.ai.
By using the Website, you acknowledge that you have read these Terms of Service and agree to be bound by them.
endpointsMenu: true
modelSelect: true
parameters: true
sidePanel: true
presets: true
prompts: true
bookmarks: true
multiConvo: true
agents: true
peoplePicker:
users: true
groups: true
roles: true
marketplace:
use: true
fileCitations: true
# Temporary chat retention period in hours (default: 720, min: 1, max: 8760)
# temporaryChatRetention: 1
# Example Cloudflare turnstile (optional)
#turnstile:
# siteKey: "your-site-key-here"
# options:
# language: "auto" # "auto" or an ISO 639-1 language code (e.g. en)
# size: "normal" # Options: "normal", "compact", "flexible", or "invisible"
# Example Registration Object Structure (optional)
registration:
socialLogins: ['github', 'google', 'discord', 'openid', 'facebook', 'apple', 'saml']
# allowedDomains:
# - "gmail.com"
# Example Balance settings
# balance:
# enabled: false
# startBalance: 20000
# autoRefillEnabled: false
# refillIntervalValue: 30
# refillIntervalUnit: 'days'
# refillAmount: 10000
# Example Transactions settings
# Controls whether to save transaction records to the database
# Default is true (enabled)
#transactions:
# enabled: false
# Note: If balance.enabled is true, transactions will always be enabled
# regardless of this setting to ensure balance tracking works correctly
# speech:
# tts:
# openai:
# url: ''
# apiKey: '${TTS_API_KEY}'
# model: ''
# voices: ['']
#
# stt:
# openai:
# url: ''
# apiKey: '${STT_API_KEY}'
# model: ''
# rateLimits:
# fileUploads:
# ipMax: 100
# ipWindowInMinutes: 60 # Rate limit window for file uploads per IP
# userMax: 50
# userWindowInMinutes: 60 # Rate limit window for file uploads per user
# conversationsImport:
# ipMax: 100
# ipWindowInMinutes: 60 # Rate limit window for conversation imports per IP
# userMax: 50
# userWindowInMinutes: 60 # Rate limit window for conversation imports per user
# Example Actions Object Structure
actions:
allowedDomains:
- 'swapi.dev'
- 'librechat.ai'
- 'google.com'
- 'sidepiece.rip'
- 'baked.rocks'
- 'raindrop.com'
- 'raindrop.services'
# Example MCP Servers Object Structure
mcpServers:
pieces:
# type: sse # type can optionally be omitted
url: https://pieces-mcp.baked.rocks/mcp
timeout: 60000 # 1 minute timeout for this server, this is the default timeout for MCP servers.
headers:
mcp-session-id: {{LIBRECHAT_BODY_CONVERSATIONID}}
xpipe:
url: https://xpipe-mcp.baked.rocks/mcp
timeout: 60000 # 1 minute timeout for this server, this is the default timeout for MCP servers.
headers:
mcp-session-id: {{LIBRECHAT_BODY_CONVERSATIONID}}
# everything:
# # type: sse # type can optionally be omitted
# url: http://localhost:3001/sse
# timeout: 60000 # 1 minute timeout for this server, this is the default timeout for MCP servers.
# puppeteer:
# type: stdio
# command: npx
# args:
# - -y
# - "@modelcontextprotocol/server-puppeteer"
# timeout: 300000 # 5 minutes timeout for this server
# filesystem:
# # type: stdio
# command: npx
# args:
# - -y
# - "@modelcontextprotocol/server-filesystem"
# - /home/user/LibreChat/
# iconPath: /home/user/LibreChat/client/public/assets/logo.svg
# mcp-obsidian:
# command: npx
# args:
# - -y
# - "mcp-obsidian"
# - /path/to/obsidian/vault
# Definition of custom endpoints
endpoints:
assistants:
disableBuilder: false # Disable Assistants Builder Interface by setting to `true`
pollIntervalMs: 3000 # Polling interval for checking assistant updates
timeoutMs: 180000 # Timeout for assistant operations
# # Should only be one or the other, either `supportedIds` or `excludedIds`
# supportedIds: ["asst_supportedAssistantId1", "asst_supportedAssistantId2"]
# # excludedIds: ["asst_excludedAssistantId"]
# # Only show assistants that the user created or that were created externally (e.g. in Assistants playground).
privateAssistants: false # Does not work with `supportedIds` or `excludedIds`
# # (optional) Models that support retrieval, will default to latest known OpenAI models that support the feature
retrievalModels: ["openai/gpt-5.1"]
# # (optional) Assistant Capabilities available to all users. Omit the ones you wish to exclude. Defaults to list below.
capabilities: ["code_interpreter", "retrieval", "actions", "tools", "image_vision"]
agents:
# # (optional) Default recursion depth for agents, defaults to 25
recursionLimit: 50
# # (optional) Max recursion depth for agents, defaults to 25
maxRecursionLimit: 100
# # (optional) Disable the builder interface for agents
disableBuilder: false
# # (optional) Maximum total citations to include in agent responses, defaults to 30
maxCitations: 30
# # (optional) Maximum citations per file to include in agent responses, defaults to 7
maxCitationsPerFile: 7
# # (optional) Minimum relevance score for sources to be included in responses, defaults to 0.45 (45% relevance threshold)
# # Set to 0.0 to show all sources (no filtering), or higher like 0.7 for stricter filtering
minRelevanceScore: 0.4
# # (optional) Agent Capabilities available to all users. Omit the ones you wish to exclude. Defaults to list below.
capabilities: ["execute_code", "file_search", "actions", "tools"]
custom:
# Groq Example
- name: 'bifrost'
# apiKey: '${GROQ_API_KEY}'
baseURL: 'http://bifrost.lab/v1'
headers:
x-bf-cache-key: {{LIBRECHAT_BODY_CONVERSATIONID}}
x-bf-cache-ttl: 30s
x-bf-cache-threshold: 0.9
models:
default:
- 'fireworks_ai/accounts/fireworks/models/qwen3-vl-235b-a22b-thinking'
- 'anthropic/claude-haiku-4-5-20251001'
- 'fireworks_ai/accounts/fireworks/models/minimax-m2'
- 'fireworks_ai/accounts/fireworks/models/deepseek-v3p2'
- 'gemini/gemini-3-pro-preview'
- 'gemini/gemini-2.5-flash-lite'
- 'gemini/gemini-2.5-flash'
- 'openai/gpt-5.1'
fetch: true
titleConvo: true
titleModel: 'fireworks_ai/accounts/fireworks/models/qwen3-30b-a3b'
modelDisplayLabel: 'LLM'
# Summarize setting: Set to true to enable summarization.
summarize: true
# Summary Model: Specify the model to use if summarization is enabled.
summaryModel: "fireworks_ai/accounts/fireworks/models/qwen3-30b-a3b" # Defaults to "gpt-3.5-turbo" if omitted.
# Force Prompt setting: If true, sends a `prompt` parameter instead of `messages`.
forcePrompt: false
dropParams: ['stop', 'user', 'frequency_penalty', 'presence_penalty']
# - name: 'groq'
# apiKey: '${GROQ_API_KEY}'
# baseURL: 'https://api.groq.com/openai/v1/'
# models:
# default:
# - 'llama3-70b-8192'
# - 'llama3-8b-8192'
# - 'llama2-70b-4096'
# - 'mixtral-8x7b-32768'
# - 'gemma-7b-it'
# fetch: false
# titleConvo: true
# titleModel: 'mixtral-8x7b-32768'
# modelDisplayLabel: 'groq'
# # Mistral AI Example
# - name: 'Mistral' # Unique name for the endpoint
# # For `apiKey` and `baseURL`, you can use environment variables that you define.
# # recommended environment variables:
# apiKey: '${MISTRAL_API_KEY}'
# baseURL: 'https://api.mistral.ai/v1'
# # Models configuration
# models:
# # List of default models to use. At least one value is required.
# default: ['mistral-tiny', 'mistral-small', 'mistral-medium']
# # Fetch option: Set to true to fetch models from API.
# fetch: true # Defaults to false.
# # Optional configurations
# # Title Conversation setting
# titleConvo: true # Set to true to enable title conversation
# # Title Method: Choose between "completion" or "functions".
# # titleMethod: "completion" # Defaults to "completion" if omitted.
# # Title Model: Specify the model to use for titles.
# titleModel: 'mistral-tiny' # Defaults to "gpt-3.5-turbo" if omitted.
# # Summarize setting: Set to true to enable summarization.
# # summarize: false
# # Summary Model: Specify the model to use if summarization is enabled.
# # summaryModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
# # Force Prompt setting: If true, sends a `prompt` parameter instead of `messages`.
# # forcePrompt: false
# # The label displayed for the AI model in messages.
# modelDisplayLabel: 'Mistral' # Default is "AI" when not set.
# # Add additional parameters to the request. Default params will be overwritten.
# # addParams:
# # safe_prompt: true # This field is specific to Mistral AI: https://docs.mistral.ai/api/
# # Drop Default params parameters from the request. See default params in guide linked below.
# # NOTE: For Mistral, it is necessary to drop the following parameters or you will encounter a 422 Error:
# dropParams: ['stop', 'user', 'frequency_penalty', 'presence_penalty']
# # OpenRouter Example
# - name: 'OpenRouter'
# # For `apiKey` and `baseURL`, you can use environment variables that you define.
# # recommended environment variables:
# apiKey: '${OPENROUTER_KEY}'
# baseURL: 'https://openrouter.ai/api/v1'
# headers:
# x-librechat-body-parentmessageid: '{{LIBRECHAT_BODY_PARENTMESSAGEID}}'
# models:
# default: ['meta-llama/llama-3-70b-instruct']
# fetch: true
# titleConvo: true
# titleModel: 'meta-llama/llama-3-70b-instruct'
# # Recommended: Drop the stop parameter from the request as Openrouter models use a variety of stop tokens.
# dropParams: ['stop']
# modelDisplayLabel: 'OpenRouter'
# # Helicone Example
# - name: 'Helicone'
# # For `apiKey` and `baseURL`, you can use environment variables that you define.
# # recommended environment variables:
# apiKey: '${HELICONE_KEY}'
# baseURL: 'https://ai-gateway.helicone.ai'
# headers:
# x-librechat-body-parentmessageid: '{{LIBRECHAT_BODY_PARENTMESSAGEID}}'
# models:
# default: ['gpt-4o-mini', 'claude-4.5-sonnet', 'llama-3.1-8b-instruct', 'gemini-2.5-flash-lite']
# fetch: true
# titleConvo: true
# titleModel: 'gpt-4o-mini'
# modelDisplayLabel: 'Helicone'
# iconURL: https://marketing-assets-helicone.s3.us-west-2.amazonaws.com/helicone.png
# # Portkey AI Example
# - name: 'Portkey'
# apiKey: 'dummy'
# baseURL: 'https://api.portkey.ai/v1'
# headers:
# x-portkey-api-key: '${PORTKEY_API_KEY}'
# x-portkey-virtual-key: '${PORTKEY_OPENAI_VIRTUAL_KEY}'
# models:
# default: ['gpt-4o-mini', 'gpt-4o', 'chatgpt-4o-latest']
# fetch: true
# titleConvo: true
# titleModel: 'current_model'
# summarize: false
# summaryModel: 'current_model'
# forcePrompt: false
# modelDisplayLabel: 'Portkey'
# iconURL: https://images.crunchbase.com/image/upload/c_pad,f_auto,q_auto:eco,dpr_1/rjqy7ghvjoiu4cd1xjbf
# Example modelSpecs configuration showing grouping options
# The 'group' field organizes model specs in the UI selector:
# - If 'group' matches an endpoint name (e.g., "openAI", "groq"), the spec appears nested under that endpoint
# - If 'group' is a custom name (doesn't match any endpoint), it creates a separate collapsible section
# - If 'group' is omitted, the spec appears as a standalone item at the top level
# modelSpecs:
# list:
# # Example 1: Nested under an endpoint (grouped with openAI endpoint)
# - name: "gpt-4o"
# label: "GPT-4 Optimized"
# description: "Most capable GPT-4 model with multimodal support"
# group: "openAI" # String value matching the endpoint name
# preset:
# endpoint: "openAI"
# model: "gpt-4o"
#
# # Example 2: Nested under a custom endpoint (grouped with groq endpoint)
# - name: "llama3-70b-8192"
# label: "Llama 3 70B"
# description: "Fastest inference available - great for quick responses"
# group: "groq" # String value matching your custom endpoint name from endpoints.custom
# preset:
# endpoint: "groq"
# model: "llama3-70b-8192"
#
# # Example 3: Custom group (creates a separate collapsible section)
# - name: "coding-assistant"
# label: "Coding Assistant"
# description: "Specialized for coding tasks"
# group: "my-assistants" # Custom string - doesn't match any endpoint, so creates its own group
# preset:
# endpoint: "openAI"
# model: "gpt-4o"
# instructions: "You are an expert coding assistant..."
# temperature: 0.3
#
# - name: "writing-assistant"
# label: "Writing Assistant"
# description: "Specialized for creative writing"
# group: "my-assistants" # Same custom group name - both specs appear in same section
# preset:
# endpoint: "anthropic"
# model: "claude-sonnet-4"
# instructions: "You are a creative writing expert..."
#
# # Example 4: Standalone (no group - appears at top level)
# - name: "general-assistant"
# label: "General Assistant"
# description: "General purpose assistant"
# # No 'group' field - appears as standalone item at top level (not nested)
# preset:
# endpoint: "openAI"
# model: "gpt-4o-mini"
# fileConfig:
# endpoints:
# assistants:
# fileLimit: 5
# fileSizeLimit: 10 # Maximum size for an individual file in MB
# totalSizeLimit: 50 # Maximum total size for all files in a single request in MB
# supportedMimeTypes:
# - "image/.*"
# - "application/pdf"
# openAI:
# disabled: true # Disables file uploading to the OpenAI endpoint
# default:
# totalSizeLimit: 20
# YourCustomEndpointName:
# fileLimit: 2
# fileSizeLimit: 5
# serverFileSizeLimit: 100 # Global server file size limit in MB
# avatarSizeLimit: 2 # Limit for user avatar image size in MB
# imageGeneration: # Image Gen settings, either percentage or px
# percentage: 100
# px: 1024
# # Client-side image resizing to prevent upload errors
# clientImageResize:
# enabled: false # Enable/disable client-side image resizing (default: false)
# maxWidth: 1900 # Maximum width for resized images (default: 1900)
# maxHeight: 1900 # Maximum height for resized images (default: 1900)
# quality: 0.92 # JPEG quality for compression (0.0-1.0, default: 0.92)
# # See the Custom Configuration Guide for more information on Assistants Config:
# # https://www.librechat.ai/docs/configuration/librechat_yaml/object_structure/assistants_endpoint
# Web Search Configuration (optional)
webSearch:
# # Jina Reranking Configuration
# jinaApiKey: '${JINA_API_KEY}' # Your Jina API key
# jinaApiUrl: '${JINA_API_URL}' # Custom Jina API URL (optional, defaults to https://api.jina.ai/v1/rerank)
# # Other rerankers
# cohereApiKey: '${COHERE_API_KEY}'
# # Search providers
# serperApiKey: '${SERPER_API_KEY}'
# searxngInstanceUrl: '${SEARXNG_INSTANCE_URL}'
# searxngApiKey: '${SEARXNG_API_KEY}'
# # Content scrapers
firecrawlApiKey: 'http://crawl.lab'
firecrawlApiUrl: 'dummy-key'
# Memory configuration for user memories
memory:
# (optional) Disable memory functionality
disabled: false
# (optional) Restrict memory keys to specific values to limit memory storage and improve consistency
validKeys: ["preferences", "work_info", "personal_info", "skills", "interests", "context"]
# (optional) Maximum token limit for memory storage (not yet implemented for token counting)
tokenLimit: 10000
# (optional) Enable personalization features (defaults to true if memory is configured)
# When false, users will not see the Personalization tab in settings
personalize: true
# Memory agent configuration - either use an existing agent by ID or define inline
agent:
# Option 1: Use existing agent by ID
# id: "your-memory-agent-id"
# Option 2: Define agent inline
provider: "openai"
model: "gemini/gemini-flash-lite"
instructions: "You are a memory management assistant. Store and manage user information accurately."
# model_parameters:
# temperature: 0.1

792
stack.env Normal file
View File

@@ -0,0 +1,792 @@
#=====================================================================#
# LibreChat Configuration #
#=====================================================================#
# Please refer to the reference documentation for assistance #
# with configuring your LibreChat environment. #
# #
# https://www.librechat.ai/docs/configuration/dotenv #
#=====================================================================#
#==================================================#
# Server Configuration #
#==================================================#
HOST=localhost
PORT=3080
MONGO_URI=mongodb://127.0.0.1:27017/LibreChat
#The maximum number of connections in the connection pool. */
MONGO_MAX_POOL_SIZE=
#The minimum number of connections in the connection pool. */
MONGO_MIN_POOL_SIZE=
#The maximum number of connections that may be in the process of being established concurrently by the connection pool. */
MONGO_MAX_CONNECTING=
#The maximum number of milliseconds that a connection can remain idle in the pool before being removed and closed. */
MONGO_MAX_IDLE_TIME_MS=
#The maximum time in milliseconds that a thread can wait for a connection to become available. */
MONGO_WAIT_QUEUE_TIMEOUT_MS=
# Set to false to disable automatic index creation for all models associated with this connection. */
MONGO_AUTO_INDEX=
# Set to `false` to disable Mongoose automatically calling `createCollection()` on every model created on this connection. */
MONGO_AUTO_CREATE=
DOMAIN_CLIENT=https://chat.baked.rocks
DOMAIN_SERVER=http://chat.baked.rocks
NO_INDEX=true
# Use the address that is at most n number of hops away from the Express application.
# req.socket.remoteAddress is the first hop, and the rest are looked for in the X-Forwarded-For header from right to left.
# A value of 0 means that the first untrusted address would be req.socket.remoteAddress, i.e. there is no reverse proxy.
# Defaulted to 1.
TRUST_PROXY=1
# Minimum password length for user authentication
# Default: 8
# Note: When using LDAP authentication, you may want to set this to 1
# to bypass local password validation, as LDAP servers handle their own
# password policies.
# MIN_PASSWORD_LENGTH=8
#===============#
# JSON Logging #
#===============#
# Use when process console logs in cloud deployment like GCP/AWS
CONSOLE_JSON=false
#===============#
# Debug Logging #
#===============#
DEBUG_LOGGING=true
DEBUG_CONSOLE=false
#=============#
# Permissions #
#=============#
# UID=1000
# GID=1000
#===============#
# Configuration #
#===============#
# Use an absolute path, a relative path, or a URL
CONFIG_PATH="/home/trav/apps/LibreChat/librechat.yaml"
#===================================================#
# Endpoints #
#===================================================#
ENDPOINTS=openAI,assistants,google,anthropic,custom,fireworks,openrouter,deepseek,perplexity,groq,cohere,mistral
PROXY=
#===================================#
# Known Endpoints - librechat.yaml #
#===================================#
# https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints
# ANYSCALE_API_KEY=
# APIPIE_API_KEY=
COHERE_API_KEY=user_provided
DEEPSEEK_API_KEY=user_provided
# DATABRICKS_API_KEY=
FIREWORKS_API_KEY=user_provided
GROQ_API_KEY=user_provided
# HUGGINGFACE_TOKEN=
MISTRAL_API_KEY=user_provided
OPENROUTER_KEY=user_provided
PERPLEXITY_API_KEY=user_provided
# SHUTTLEAI_API_KEY=
# TOGETHERAI_API_KEY=
# UNIFY_API_KEY=
# XAI_API_KEY=
#============#
# Anthropic #
#============#
ANTHROPIC_API_KEY=user_provided
# ANTHROPIC_MODELS=claude-opus-4-20250514,claude-sonnet-4-20250514,claude-3-7-sonnet-20250219,claude-3-5-sonnet-20241022,claude-3-5-haiku-20241022,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307
# ANTHROPIC_REVERSE_PROXY=
#============#
# Azure #
#============#
# Note: these variables are DEPRECATED
# Use the `librechat.yaml` configuration for `azureOpenAI` instead
# You may also continue to use them if you opt out of using the `librechat.yaml` configuration
# AZURE_OPENAI_DEFAULT_MODEL=gpt-3.5-turbo # Deprecated
# AZURE_OPENAI_MODELS=gpt-3.5-turbo,gpt-4 # Deprecated
# AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE # Deprecated
# AZURE_API_KEY= # Deprecated
# AZURE_OPENAI_API_INSTANCE_NAME= # Deprecated
# AZURE_OPENAI_API_DEPLOYMENT_NAME= # Deprecated
# AZURE_OPENAI_API_VERSION= # Deprecated
# AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME= # Deprecated
# AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME= # Deprecated
# PLUGINS_USE_AZURE="true" # Deprecated
#=================#
# AWS Bedrock #
#=================#
# BEDROCK_AWS_DEFAULT_REGION=us-east-1 # A default region must be provided
# BEDROCK_AWS_ACCESS_KEY_ID=someAccessKey
# BEDROCK_AWS_SECRET_ACCESS_KEY=someSecretAccessKey
# BEDROCK_AWS_SESSION_TOKEN=someSessionToken
# Note: This example list is not meant to be exhaustive. If omitted, all known, supported model IDs will be included for you.
# BEDROCK_AWS_MODELS=anthropic.claude-3-5-sonnet-20240620-v1:0,meta.llama3-1-8b-instruct-v1:0
# See all Bedrock model IDs here: https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns
# Notes on specific models:
# The following models are not support due to not supporting streaming:
# ai21.j2-mid-v1
# The following models are not support due to not supporting conversation history:
# ai21.j2-ultra-v1, cohere.command-text-v14, cohere.command-light-text-v14
#============#
# Google #
#============#
GOOGLE_KEY=user_provided
# GOOGLE_REVERSE_PROXY=
# Some reverse proxies do not support the X-goog-api-key header, uncomment to pass the API key in Authorization header instead.
# GOOGLE_AUTH_HEADER=true
# Gemini API (AI Studio)
# GOOGLE_MODELS=gemini-2.5-pro,gemini-2.5-flash,gemini-2.5-flash-lite,gemini-2.0-flash,gemini-2.0-flash-lite
# Vertex AI
# GOOGLE_MODELS=gemini-2.5-pro,gemini-2.5-flash,gemini-2.5-flash-lite,gemini-2.0-flash-001,gemini-2.0-flash-lite-001
# GOOGLE_TITLE_MODEL=gemini-2.0-flash-lite-001
# GOOGLE_LOC=us-central1
# Google Safety Settings
# NOTE: These settings apply to both Vertex AI and Gemini API (AI Studio)
#
# For Vertex AI:
# To use the BLOCK_NONE setting, you need either:
# (a) Access through an allowlist via your Google account team, or
# (b) Switch to monthly invoiced billing: https://cloud.google.com/billing/docs/how-to/invoiced-billing
#
# For Gemini API (AI Studio):
# BLOCK_NONE is available by default, no special account requirements.
#
# Available options: BLOCK_NONE, BLOCK_ONLY_HIGH, BLOCK_MEDIUM_AND_ABOVE, BLOCK_LOW_AND_ABOVE
#
# GOOGLE_SAFETY_SEXUALLY_EXPLICIT=BLOCK_ONLY_HIGH
# GOOGLE_SAFETY_HATE_SPEECH=BLOCK_ONLY_HIGH
# GOOGLE_SAFETY_HARASSMENT=BLOCK_ONLY_HIGH
# GOOGLE_SAFETY_DANGEROUS_CONTENT=BLOCK_ONLY_HIGH
# GOOGLE_SAFETY_CIVIC_INTEGRITY=BLOCK_ONLY_HIGH
#============#
# OpenAI #
#============#
OPENAI_API_KEY=user_provided
# OPENAI_MODELS=gpt-5,gpt-5-codex,gpt-5-mini,gpt-5-nano,o3-pro,o3,o4-mini,gpt-4.1,gpt-4.1-mini,gpt-4.1-nano,o3-mini,o1-pro,o1,gpt-4o,gpt-4o-mini
DEBUG_OPENAI=false
# TITLE_CONVO=false
# OPENAI_TITLE_MODEL=gpt-4o-mini
# OPENAI_SUMMARIZE=true
# OPENAI_SUMMARY_MODEL=gpt-4o-mini
# OPENAI_FORCE_PROMPT=true
# OPENAI_REVERSE_PROXY=
# OPENAI_ORGANIZATION=
#====================#
# Assistants API #
#====================#
ASSISTANTS_API_KEY=user_provided
# ASSISTANTS_BASE_URL=
# ASSISTANTS_MODELS=gpt-4o,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-16k-0613,gpt-3.5-turbo-16k,gpt-3.5-turbo,gpt-4,gpt-4-0314,gpt-4-32k-0314,gpt-4-0613,gpt-3.5-turbo-0613,gpt-3.5-turbo-1106,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview
#==========================#
# Azure Assistants API #
#==========================#
# Note: You should map your credentials with custom variables according to your Azure OpenAI Configuration
# The models for Azure Assistants are also determined by your Azure OpenAI configuration.
# More info, including how to enable use of Assistants with Azure here:
# https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints/azure#using-assistants-with-azure
#============#
# Plugins #
#============#
# PLUGIN_MODELS=gpt-4o,gpt-4o-mini,gpt-4,gpt-4-turbo-preview,gpt-4-0125-preview,gpt-4-1106-preview,gpt-4-0613,gpt-3.5-turbo,gpt-3.5-turbo-0125,gpt-3.5-turbo-1106,gpt-3.5-turbo-0613
DEBUG_PLUGINS=true
CREDS_KEY=f34be427ebb29de8d88c107a71546019685ed8b241d8f2ed00c3df97ad2566f0
CREDS_IV=e2341419ec3dd3d19b13a1a87fafcbfb
# Azure AI Search
#-----------------
AZURE_AI_SEARCH_SERVICE_ENDPOINT=
AZURE_AI_SEARCH_INDEX_NAME=
AZURE_AI_SEARCH_API_KEY=
AZURE_AI_SEARCH_API_VERSION=
AZURE_AI_SEARCH_SEARCH_OPTION_QUERY_TYPE=
AZURE_AI_SEARCH_SEARCH_OPTION_TOP=
AZURE_AI_SEARCH_SEARCH_OPTION_SELECT=
# OpenAI Image Tools Customization
#----------------
IMAGE_GEN_OAI_API_KEY=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 # Create or reuse OpenAI API key for image generation tool
# IMAGE_GEN_OAI_BASEURL= # Custom OpenAI base URL for image generation tool
# IMAGE_GEN_OAI_AZURE_API_VERSION= # Custom Azure OpenAI deployments
# IMAGE_GEN_OAI_DESCRIPTION=
# IMAGE_GEN_OAI_DESCRIPTION_WITH_FILES=Custom description for image generation tool when files are present
# IMAGE_GEN_OAI_DESCRIPTION_NO_FILES=Custom description for image generation tool when no files are present
# IMAGE_EDIT_OAI_DESCRIPTION=Custom description for image editing tool
# IMAGE_GEN_OAI_PROMPT_DESCRIPTION=Custom prompt description for image generation tool
# IMAGE_EDIT_OAI_PROMPT_DESCRIPTION=Custom prompt description for image editing tool
# DALL·E
#----------------
# DALLE_API_KEY=
# DALLE3_API_KEY=
# DALLE2_API_KEY=
# DALLE3_SYSTEM_PROMPT=
# DALLE2_SYSTEM_PROMPT=
# DALLE_REVERSE_PROXY=
# DALLE3_BASEURL=
# DALLE2_BASEURL=
# DALL·E (via Azure OpenAI)
# Note: requires some of the variables above to be set
#----------------
# DALLE3_AZURE_API_VERSION=
# DALLE2_AZURE_API_VERSION=
# Flux
#-----------------
FLUX_API_BASE_URL=https://api.us1.bfl.ai
# FLUX_API_BASE_URL = 'https://api.bfl.ml';
# Get your API key at https://api.us1.bfl.ai/auth/profile
# FLUX_API_KEY=
# Google
#-----------------
GOOGLE_SEARCH_API_KEY=
GOOGLE_CSE_ID=
# YOUTUBE
#-----------------
YOUTUBE_API_KEY=
# Stable Diffusion
#-----------------
SD_WEBUI_URL=http://host.docker.internal:7860
# Tavily
#-----------------
TAVILY_API_KEY=
# Traversaal
#-----------------
TRAVERSAAL_API_KEY=
# WolframAlpha
#-----------------
WOLFRAM_APP_ID=
# Zapier
#-----------------
ZAPIER_NLA_API_KEY=
#==================================================#
# Search #
#==================================================#
SEARCH=true
MEILI_NO_ANALYTICS=true
MEILI_HOST=http://0.0.0.0:7700
MEILI_MASTER_KEY=DrhYf7zENyR6AlUCKmnz0eYASOQdl6zxH7s7MKFSfFCt
# Optional: Disable indexing, useful in a multi-node setup
# where only one instance should perform an index sync.
# MEILI_NO_SYNC=true
#==================================================#
# Speech to Text & Text to Speech #
#==================================================#
STT_API_KEY=
TTS_API_KEY=
#==================================================#
# RAG #
#==================================================#
# More info: https://www.librechat.ai/docs/configuration/rag_api
RAG_API_URL=http://host.docker.internal:8000
# RAG_OPENAI_BASEURL=
RAG_OPENAI_API_KEY=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
# RAG_USE_FULL_CONTEXT=
EMBEDDINGS_PROVIDER=openai
EMBEDDINGS_MODEL=text-embedding-3-small
#===================================================#
# User System #
#===================================================#
#========================#
# Moderation #
#========================#
OPENAI_MODERATION=false
OPENAI_MODERATION_API_KEY=
# OPENAI_MODERATION_REVERSE_PROXY=
BAN_VIOLATIONS=true
BAN_DURATION=1000 * 60 * 60 * 2
BAN_INTERVAL=20
LOGIN_VIOLATION_SCORE=1
REGISTRATION_VIOLATION_SCORE=1
CONCURRENT_VIOLATION_SCORE=1
MESSAGE_VIOLATION_SCORE=1
NON_BROWSER_VIOLATION_SCORE=20
TTS_VIOLATION_SCORE=0
STT_VIOLATION_SCORE=0
FORK_VIOLATION_SCORE=0
IMPORT_VIOLATION_SCORE=0
FILE_UPLOAD_VIOLATION_SCORE=0
LOGIN_MAX=7
LOGIN_WINDOW=5
REGISTER_MAX=5
REGISTER_WINDOW=60
LIMIT_CONCURRENT_MESSAGES=true
CONCURRENT_MESSAGE_MAX=2
LIMIT_MESSAGE_IP=true
MESSAGE_IP_MAX=40
MESSAGE_IP_WINDOW=1
LIMIT_MESSAGE_USER=false
MESSAGE_USER_MAX=40
MESSAGE_USER_WINDOW=1
ILLEGAL_MODEL_REQ_SCORE=5
#========================#
# Balance #
#========================#
# CHECK_BALANCE=false
# START_BALANCE=20000 # note: the number of tokens that will be credited after registration.
#========================#
# Registration and Login #
#========================#
ALLOW_EMAIL_LOGIN=true
ALLOW_REGISTRATION=true
ALLOW_SOCIAL_LOGIN=true
ALLOW_SOCIAL_REGISTRATION=true
ALLOW_PASSWORD_RESET=false
# ALLOW_ACCOUNT_DELETION=true # note: enabled by default if omitted/commented out
ALLOW_UNVERIFIED_EMAIL_LOGIN=true
SESSION_EXPIRY=1000 * 60 * 15
REFRESH_TOKEN_EXPIRY=(1000 * 60 * 60 * 24) * 7
JWT_SECRET=16f8c0ef4a5d391b26034086c628469d3f9f497f08163ab9b40137092f2909ef
JWT_REFRESH_SECRET=eaa5191f2914e30b9387fd84e254e4ba6fc51b4654968a9b0803b456a54b8418
# Discord
DISCORD_CLIENT_ID=
DISCORD_CLIENT_SECRET=
DISCORD_CALLBACK_URL=/oauth/discord/callback
# Facebook
FACEBOOK_CLIENT_ID=
FACEBOOK_CLIENT_SECRET=
FACEBOOK_CALLBACK_URL=/oauth/facebook/callback
# GitHub
GITHUB_CLIENT_ID=
GITHUB_CLIENT_SECRET=
GITHUB_CALLBACK_URL=/oauth/github/callback
# GitHub Enterprise
# GITHUB_ENTERPRISE_BASE_URL=
# GITHUB_ENTERPRISE_USER_AGENT=
# Google
GOOGLE_CLIENT_ID=
GOOGLE_CLIENT_SECRET=
GOOGLE_CALLBACK_URL=/oauth/google/callback
# Apple
APPLE_CLIENT_ID=
APPLE_TEAM_ID=
APPLE_KEY_ID=
APPLE_PRIVATE_KEY_PATH=
APPLE_CALLBACK_URL=/oauth/apple/callback
# OpenID
OPENID_CLIENT_ID=WAKWgswi861g5ffBXaUTvFwSKr0PbtjiAadXthOR
OPENID_CLIENT_SECRET=fF1p5Le8bcyyag0Itwn91ZydlxwSnqCN2de1pudASxIA4c8phsYGztXdGUjCQes9TGS20YfkmhaP6OabsZY4CptsFGj47RhgjgfowyPljsblrOyJ6yQv8MQsk7p24qpg
OPENID_ISSUER=https://auth.baked.rocks/application/o/librechat/.well-known/openid-configuration
OPENID_SESSION_SECRET=5685643423f66ee9ad0c743b45c0caaee6c3377463a12c74dc9da2cb1cb19d0f
OPENID_SCOPE="openid profile email"
OPENID_CALLBACK_URL=/oauth/openid/callback
OPENID_REQUIRED_ROLE=
OPENID_REQUIRED_ROLE_TOKEN_KIND=
OPENID_REQUIRED_ROLE_PARAMETER_PATH=
OPENID_ADMIN_ROLE=
OPENID_ADMIN_ROLE_PARAMETER_PATH=
OPENID_ADMIN_ROLE_TOKEN_KIND=
# Set to determine which user info property returned from OpenID Provider to store as the User's username
OPENID_USERNAME_CLAIM=
# Set to determine which user info property returned from OpenID Provider to store as the User's name
OPENID_NAME_CLAIM=
# Optional audience parameter for OpenID authorization requests
OPENID_AUDIENCE=
OPENID_GENERATE_NONCE=true
OPENID_USE_END_SESSION_ENDPOINT=true
OPENID_BUTTON_LABEL=Login with Magick
OPENID_IMAGE_URL=https://cdn.jsdelivr.net/gh/selfhst/icons/png/authentik.png
# Set to true to automatically redirect to the OpenID provider when a user visits the login page
# This will bypass the login form completely for users, only use this if OpenID is your only authentication method
OPENID_AUTO_REDIRECT=false
# Set to true to use PKCE (Proof Key for Code Exchange) for OpenID authentication
OPENID_USE_PKCE=false
#Set to true to reuse openid tokens for authentication management instead of using the mongodb session and the custom refresh token.
OPENID_REUSE_TOKENS=
#By default, signing key verification results are cached in order to prevent excessive HTTP requests to the JWKS endpoint.
#If a signing key matching the kid is found, this will be cached and the next time this kid is requested the signing key will be served from the cache.
#Default is true.
OPENID_JWKS_URL_CACHE_ENABLED=
OPENID_JWKS_URL_CACHE_TIME= # 600000 ms eq to 10 minutes leave empty to disable caching
#Set to true to trigger token exchange flow to acquire access token for the userinfo endpoint.
OPENID_ON_BEHALF_FLOW_FOR_USERINFO_REQUIRED=
OPENID_ON_BEHALF_FLOW_USERINFO_SCOPE="user.read" # example for Scope Needed for Microsoft Graph API
# Set to true to use the OpenID Connect end session endpoint for logout
OPENID_USE_END_SESSION_ENDPOINT=
#========================#
# SharePoint Integration #
#========================#
# Requires Entra ID (OpenID) authentication to be configured
# Enable SharePoint file picker in chat and agent panels
# ENABLE_SHAREPOINT_FILEPICKER=true
# SharePoint tenant base URL (e.g., https://yourtenant.sharepoint.com)
# SHAREPOINT_BASE_URL=https://yourtenant.sharepoint.com
# Microsoft Graph API And SharePoint scopes for file picker
# SHAREPOINT_PICKER_SHAREPOINT_SCOPE==https://yourtenant.sharepoint.com/AllSites.Read
# SHAREPOINT_PICKER_GRAPH_SCOPE=Files.Read.All
#========================#
# SAML
# Note: If OpenID is enabled, SAML authentication will be automatically disabled.
SAML_ENTRY_POINT=
SAML_ISSUER=
SAML_CERT=
SAML_CALLBACK_URL=/oauth/saml/callback
SAML_SESSION_SECRET=
# Attribute mappings (optional)
SAML_EMAIL_CLAIM=
SAML_USERNAME_CLAIM=
SAML_GIVEN_NAME_CLAIM=
SAML_FAMILY_NAME_CLAIM=
SAML_PICTURE_CLAIM=
SAML_NAME_CLAIM=
# Logint buttion settings (optional)
SAML_BUTTON_LABEL=
SAML_IMAGE_URL=
# Whether the SAML Response should be signed.
# - If "true", the entire `SAML Response` will be signed.
# - If "false" or unset, only the `SAML Assertion` will be signed (default behavior).
# SAML_USE_AUTHN_RESPONSE_SIGNED=
#===============================================#
# Microsoft Graph API / Entra ID Integration #
#===============================================#
# Enable Entra ID people search integration in permissions/sharing system
# When enabled, the people picker will search both local database and Entra ID
USE_ENTRA_ID_FOR_PEOPLE_SEARCH=false
# When enabled, entra id groups owners will be considered as members of the group
ENTRA_ID_INCLUDE_OWNERS_AS_MEMBERS=false
# Microsoft Graph API scopes needed for people/group search
# Default scopes provide access to user profiles and group memberships
OPENID_GRAPH_SCOPES=User.Read,People.Read,GroupMember.Read.All
# LDAP
LDAP_URL=
LDAP_BIND_DN=
LDAP_BIND_CREDENTIALS=
LDAP_USER_SEARCH_BASE=
#LDAP_SEARCH_FILTER="mail="
LDAP_CA_CERT_PATH=
# LDAP_TLS_REJECT_UNAUTHORIZED=
# LDAP_STARTTLS=
# LDAP_LOGIN_USES_USERNAME=true
# LDAP_ID=
# LDAP_USERNAME=
# LDAP_EMAIL=
# LDAP_FULL_NAME=
#========================#
# Email Password Reset #
#========================#
EMAIL_SERVICE=
EMAIL_HOST=
EMAIL_PORT=25
EMAIL_ENCRYPTION=
EMAIL_ENCRYPTION_HOSTNAME=
EMAIL_ALLOW_SELFSIGNED=
EMAIL_USERNAME=
EMAIL_PASSWORD=
EMAIL_FROM_NAME=
EMAIL_FROM=noreply@librechat.ai
#========================#
# Mailgun API #
#========================#
MAILGUN_API_KEY=59f23cd3d50b1d1bc7906ad86f1e3a63-51afd2db-044a050d
MAILGUN_DOMAIN=thingswithstuff.io
EMAIL_FROM=tinkerer@thingswithstuff.io
EMAIL_FROM_NAME="LibreChat"
# # Optional: For EU region
# MAILGUN_HOST=https://api.eu.mailgun.net
#========================#
# Firebase CDN #
#========================#
FIREBASE_API_KEY=
FIREBASE_AUTH_DOMAIN=
FIREBASE_PROJECT_ID=
FIREBASE_STORAGE_BUCKET=
FIREBASE_MESSAGING_SENDER_ID=
FIREBASE_APP_ID=
#========================#
# S3 AWS Bucket #
#========================#
AWS_ENDPOINT_URL=https://io.baked.rocks
AWS_ACCESS_KEY_ID=root
AWS_SECRET_ACCESS_KEY=squirtle123
AWS_REGION=us-east-1
AWS_BUCKET_NAME=librechat
#========================#
# Azure Blob Storage #
#========================#
AZURE_STORAGE_CONNECTION_STRING=
AZURE_STORAGE_PUBLIC_ACCESS=false
AZURE_CONTAINER_NAME=files
#========================#
# Shared Links #
#========================#
ALLOW_SHARED_LINKS=true
ALLOW_SHARED_LINKS_PUBLIC=true
#==============================#
# Static File Cache Control #
#==============================#
# Leave commented out to use defaults: 1 day (86400 seconds) for s-maxage and 2 days (172800 seconds) for max-age
# NODE_ENV must be set to production for these to take effect
# STATIC_CACHE_MAX_AGE=172800
# STATIC_CACHE_S_MAX_AGE=86400
# If you have another service in front of your LibreChat doing compression, disable express based compression here
# DISABLE_COMPRESSION=true
# If you have gzipped version of uploaded image images in the same folder, this will enable gzip scan and serving of these images
# Note: The images folder will be scanned on startup and a ma kept in memory. Be careful for large number of images.
# ENABLE_IMAGE_OUTPUT_GZIP_SCAN=true
#===================================================#
# UI #
#===================================================#
APP_TITLE=LibreChat
# CUSTOM_FOOTER="My custom footer"
HELP_AND_FAQ_URL=https://librechat.ai
# SHOW_BIRTHDAY_ICON=true
# Google tag manager id
#ANALYTICS_GTM_ID=user provided google tag manager id
# limit conversation file imports to a certain number of bytes in size to avoid the container
# maxing out memory limitations by unremarking this line and supplying a file size in bytes
# such as the below example of 250 mib
# CONVERSATION_IMPORT_MAX_FILE_SIZE_BYTES=262144000
#===============#
# REDIS Options #
#===============#
# Enable Redis for caching and session storage
USE_REDIS=true
# Single Redis instance
REDIS_URI=redis://192.168.50.210:6379/3
# Redis cluster (multiple nodes)
# REDIS_URI=redis://127.0.0.1:7001,redis://127.0.0.1:7002,redis://127.0.0.1:7003
# Redis with TLS/SSL encryption and CA certificate
# REDIS_URI=rediss://127.0.0.1:6380
# REDIS_CA=/path/to/ca-cert.pem
# Elasticache may need to use an alternate dnsLookup for TLS connections. see "Special Note: Aws Elasticache Clusters with TLS" on this webpage: https://www.npmjs.com/package/ioredis
# Enable alternative dnsLookup for redis
# REDIS_USE_ALTERNATIVE_DNS_LOOKUP=true
# Redis authentication (if required)
# REDIS_USERNAME=your_redis_username
# REDIS_PASSWORD=your_redis_password
# Redis key prefix configuration
# Use environment variable name for dynamic prefix (recommended for cloud deployments)
# REDIS_KEY_PREFIX_VAR=K_REVISION
# Or use static prefix directly
# REDIS_KEY_PREFIX=librechat
# Redis connection limits
# REDIS_MAX_LISTENERS=40
# Redis ping interval in seconds (0 = disabled, >0 = enabled)
# When set to a positive integer, Redis clients will ping the server at this interval to keep connections alive
# When unset or 0, no pinging is performed (recommended for most use cases)
# REDIS_PING_INTERVAL=300
# Force specific cache namespaces to use in-memory storage even when Redis is enabled
# Comma-separated list of CacheKeys (e.g., ROLES,MESSAGES)
# FORCED_IN_MEMORY_CACHE_NAMESPACES=ROLES,MESSAGES
# Leader Election Configuration (for multi-instance deployments with Redis)
# Duration in seconds that the leader lease is valid before it expires (default: 25)
# LEADER_LEASE_DURATION=25
# Interval in seconds at which the leader renews its lease (default: 10)
# LEADER_RENEW_INTERVAL=10
# Maximum number of retry attempts when renewing the lease fails (default: 3)
# LEADER_RENEW_ATTEMPTS=3
# Delay in seconds between retry attempts when renewing the lease (default: 0.5)
# LEADER_RENEW_RETRY_DELAY=0.5
#==================================================#
# Others #
#==================================================#
# You should leave the following commented out #
# NODE_ENV=
# E2E_USER_EMAIL=
# E2E_USER_PASSWORD=
#=====================================================#
# Cache Headers #
#=====================================================#
# Headers that control caching of the index.html #
# Default configuration prevents caching to ensure #
# users always get the latest version. Customize #
# only if you understand caching implications. #
# INDEX_CACHE_CONTROL=no-cache, no-store, must-revalidate
# INDEX_PRAGMA=no-cache
# INDEX_EXPIRES=0
# no-cache: Forces validation with server before using cached version
# no-store: Prevents storing the response entirely
# must-revalidate: Prevents using stale content when offline
#=====================================================#
# OpenWeather #
#=====================================================#
OPENWEATHER_API_KEY=abac030ace40794a6b3afcd59faf676e
#====================================#
# LibreChat Code Interpreter API #
#====================================#
# https://code.librechat.ai
# LIBRECHAT_CODE_API_KEY=your-key
#======================#
# Web Search #
#======================#
# Note: All of the following variable names can be customized.
# Omit values to allow user to provide them.
# For more information on configuration values, see:
# https://librechat.ai/docs/features/web_search
# Search Provider (Required)
# SERPER_API_KEY=your_serper_api_key
# Scraper (Required)
# FIRECRAWL_API_KEY=your_firecrawl_api_key
# Optional: Custom Firecrawl API URL
# FIRECRAWL_API_URL=your_firecrawl_api_url
# Reranker (Required)
# JINA_API_KEY=your_jina_api_key
# or
COHERE_API_KEY=Zx9TS3woEdUrFNpJv7ysM7yN3Bm85Wpq0KTdCUKp
#======================#
# MCP Configuration #
#======================#
# Treat 401/403 responses as OAuth requirement when no oauth metadata found
# MCP_OAUTH_ON_AUTH_ERROR=true
# Timeout for OAuth detection requests in milliseconds
# MCP_OAUTH_DETECTION_TIMEOUT=5000
# Cache connection status checks for this many milliseconds to avoid expensive verification
# MCP_CONNECTION_CHECK_TTL=60000
# Skip code challenge method validation (e.g., for AWS Cognito that supports S256 but doesn't advertise it)
# When set to true, forces S256 code challenge even if not advertised in .well-known/openid-configuration
# MCP_SKIP_CODE_CHALLENGE_CHECK=false