diff --git a/docker-compose.yml b/docker-compose.yml index 9a3e4bbd5..42dad5b7e 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -69,4 +69,4 @@ services: - .env volumes: - pgdata2: + pgdata2: \ No newline at end of file diff --git a/librechat.yaml b/librechat.yaml index e039a2077..c13e2fa05 100644 --- a/librechat.yaml +++ b/librechat.yaml @@ -203,7 +203,7 @@ mcpServers: - firecrawl-mcp env: FIRECRAWL_API_KEY: dummy-key - FIRECRAWL_API_URL: http://crawl.lab + FIRECRAWL_API_URL: http://crawl.toy context7: type: "streamable-http" url: https://mcp.context7.com/mcp @@ -281,19 +281,18 @@ endpoints: # Groq Example - name: 'litellm' apiKey: 'sk-1234' - baseURL: 'http://llm.lab' + baseURL: 'http://llm.toy' models: default: - - 'qwen3-vl-235b-a22b-thinking' - - 'qwen3-vl-235b-a22b-instruct' + - 'fireworks_ai/deepseek-v3p2' fetch: true titleConvo: true - titleModel: 'qwen3-30b-a3b-instruct' + titleModel: 'fireworks_ai/qwen3-vl-30b-a3b-instruct' modelDisplayLabel: 'LLM' # Summarize setting: Set to true to enable summarization. summarize: true # Summary Model: Specify the model to use if summarization is enabled. - summaryModel: "qwen3-30b-a3b-instruct" # Defaults to "gpt-3.5-turbo" if omitted. + summaryModel: "fireworks_ai/qwen3-vl-30b-a3b-instruct" # Defaults to "gpt-3.5-turbo" if omitted. # Force Prompt setting: If true, sends a `prompt` parameter instead of `messages`. forcePrompt: false dropParams: ['stop', 'user', 'frequency_penalty', 'presence_penalty'] @@ -543,7 +542,7 @@ memory: # id: "your-memory-agent-id" # Option 2: Define agent inline provider: "litellm" - model: "qwen3-vl-30b-a3b-instruct" + model: "fireworks_ai/qwen3-vl-30b-a3b-instruct" instructions: "You are a memory management assistant. Store and manage user information accurately and do not embellish the information." # model_parameters: # temperature: 0.1