Compare commits

..

15 Commits

Author SHA1 Message Date
Ruben Talstra
4914ef5226 👐 refactor: Fix formatting in userSchema.js 2025-02-22 14:31:07 +01:00
Ruben Talstra
b4b574e328 📦 chore: Update package-lock.json with new dependencies and version upgrades 2025-02-22 14:28:53 +01:00
Ruben Talstra
8173f5fca1 Merge branch 'main' into feat/webauthn 2025-02-22 14:28:12 +01:00
Ruben Talstra
6496c9aeda fix: merge conflict 2025-02-22 14:25:26 +01:00
Ruben Talstra
dfcbc23b8b Merge branch 'main' into feat/webauthn 2025-02-13 14:06:45 +01:00
Ruben Talstra
47a5b0a4d6 Test commit with GPG signing 2025-02-13 08:53:47 +01:00
Ruben Talstra
e9e2917042 fix: fixed missing keys for the button 2025-02-12 22:21:33 +01:00
Ruben Talstra
a0c4ddaf9e fix: disallow literal string 2025-02-12 22:16:05 +01:00
Ruben Talstra
05a4f6cc45 fix: json format 2025-02-12 21:54:37 +01:00
Ruben Talstra
3a60fa1966 Merge branch 'main' into feat/webauthn 2025-02-12 21:51:09 +01:00
Ruben Talstra
8ea085ee25 fix: working code + updated my package passport-simple-webauthn2 2025-02-12 21:49:39 +01:00
Ruben Talstra
1e1b865f4f refactor: PasskeyAuth.tsx 2025-02-12 21:13:30 +01:00
Ruben Talstra
1ab5bc425d fix: eslint issues. 2025-02-12 20:48:00 +01:00
Ruben Talstra
091d4f3192 fix: added the missing code from the merge conflict. 2025-02-12 20:42:49 +01:00
Ruben Talstra
1cb1c9196d WIP 🔐 feat: PassKey (#5606)
* added PassKey authentication.

* fixed issue with test :)

* Delete client/src/components/Auth/AuthLayout.tsx

* fix: conflicted issue
2025-02-12 20:40:29 +01:00
506 changed files with 13175 additions and 33008 deletions

View File

@@ -88,7 +88,7 @@ PROXY=
#============#
ANTHROPIC_API_KEY=user_provided
# ANTHROPIC_MODELS=claude-3-7-sonnet-latest,claude-3-7-sonnet-20250219,claude-3-5-haiku-20241022,claude-3-5-sonnet-20241022,claude-3-5-sonnet-latest,claude-3-5-sonnet-20240620,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307,claude-2.1,claude-2,claude-1.2,claude-1,claude-1-100k,claude-instant-1,claude-instant-1-100k
# ANTHROPIC_MODELS=claude-3-5-haiku-20241022,claude-3-5-sonnet-20241022,claude-3-5-sonnet-latest,claude-3-5-sonnet-20240620,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307,claude-2.1,claude-2,claude-1.2,claude-1,claude-1-100k,claude-instant-1,claude-instant-1-100k
# ANTHROPIC_REVERSE_PROXY=
#============#
@@ -142,7 +142,7 @@ GOOGLE_KEY=user_provided
# GOOGLE_AUTH_HEADER=true
# Gemini API (AI Studio)
# GOOGLE_MODELS=gemini-2.5-pro-exp-03-25,gemini-2.0-flash-exp,gemini-2.0-flash-thinking-exp-1219,gemini-exp-1121,gemini-exp-1114,gemini-1.5-flash-latest,gemini-1.0-pro,gemini-1.0-pro-001,gemini-1.0-pro-latest,gemini-1.0-pro-vision-latest,gemini-1.5-pro-latest,gemini-pro,gemini-pro-vision
# GOOGLE_MODELS=gemini-2.0-flash-exp,gemini-2.0-flash-thinking-exp-1219,gemini-exp-1121,gemini-exp-1114,gemini-1.5-flash-latest,gemini-1.0-pro,gemini-1.0-pro-001,gemini-1.0-pro-latest,gemini-1.0-pro-vision-latest,gemini-1.5-pro-latest,gemini-pro,gemini-pro-vision
# Vertex AI
# GOOGLE_MODELS=gemini-1.5-flash-preview-0514,gemini-1.5-pro-preview-0514,gemini-1.0-pro-vision-001,gemini-1.0-pro-002,gemini-1.0-pro-001,gemini-pro-vision,gemini-1.0-pro
@@ -175,7 +175,7 @@ GOOGLE_KEY=user_provided
#============#
OPENAI_API_KEY=user_provided
# OPENAI_MODELS=o1,o1-mini,o1-preview,gpt-4o,gpt-4.5-preview,chatgpt-4o-latest,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
# OPENAI_MODELS=o1,o1-mini,o1-preview,gpt-4o,chatgpt-4o-latest,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
DEBUG_OPENAI=false
@@ -209,6 +209,12 @@ ASSISTANTS_API_KEY=user_provided
# More info, including how to enable use of Assistants with Azure here:
# https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints/azure#using-assistants-with-azure
#============#
# OpenRouter #
#============#
# !!!Warning: Use the variable above instead of this one. Using this one will override the OpenAI endpoint
# OPENROUTER_API_KEY=
#============#
# Plugins #
#============#
@@ -248,13 +254,6 @@ AZURE_AI_SEARCH_SEARCH_OPTION_SELECT=
# DALLE3_AZURE_API_VERSION=
# DALLE2_AZURE_API_VERSION=
# Flux
#-----------------
FLUX_API_BASE_URL=https://api.us1.bfl.ai
# FLUX_API_BASE_URL = 'https://api.bfl.ml';
# Get your API key at https://api.us1.bfl.ai/auth/profile
# FLUX_API_KEY=
# Google
#-----------------
@@ -364,7 +363,7 @@ ILLEGAL_MODEL_REQ_SCORE=5
# Balance #
#========================#
# CHECK_BALANCE=false
CHECK_BALANCE=false
# START_BALANCE=20000 # note: the number of tokens that will be credited after registration.
#========================#
@@ -415,6 +414,10 @@ APPLE_KEY_ID=
APPLE_PRIVATE_KEY_PATH=
APPLE_CALLBACK_URL=/oauth/apple/callback
# PassKeys
PASSKEY_ENABLED=true
RP_ID=localhost
# OpenID
OPENID_CLIENT_ID=
OPENID_CLIENT_SECRET=
@@ -432,19 +435,15 @@ OPENID_NAME_CLAIM=
OPENID_BUTTON_LABEL=
OPENID_IMAGE_URL=
# Set to true to automatically redirect to the OpenID provider when a user visits the login page
# This will bypass the login form completely for users, only use this if OpenID is your only authentication method
OPENID_AUTO_REDIRECT=false
# LDAP
LDAP_URL=
LDAP_BIND_DN=
LDAP_BIND_CREDENTIALS=
LDAP_USER_SEARCH_BASE=
#LDAP_SEARCH_FILTER="mail="
LDAP_SEARCH_FILTER=mail={{username}}
LDAP_CA_CERT_PATH=
# LDAP_TLS_REJECT_UNAUTHORIZED=
# LDAP_STARTTLS=
# LDAP_LOGIN_USES_USERNAME=true
# LDAP_ID=
# LDAP_USERNAME=
@@ -477,24 +476,6 @@ FIREBASE_STORAGE_BUCKET=
FIREBASE_MESSAGING_SENDER_ID=
FIREBASE_APP_ID=
#========================#
# S3 AWS Bucket #
#========================#
AWS_ENDPOINT_URL=
AWS_ACCESS_KEY_ID=
AWS_SECRET_ACCESS_KEY=
AWS_REGION=
AWS_BUCKET_NAME=
#========================#
# Azure Blob Storage #
#========================#
AZURE_STORAGE_CONNECTION_STRING=
AZURE_STORAGE_PUBLIC_ACCESS=false
AZURE_CONTAINER_NAME=files
#========================#
# Shared Links #
#========================#

View File

@@ -39,9 +39,6 @@ jobs:
- name: Install MCP Package
run: npm run build:mcp
- name: Install Data Schemas Package
run: npm run build:data-schemas
- name: Create empty auth.json file
run: |
mkdir -p api/data
@@ -64,7 +61,4 @@ jobs:
run: cd api && npm run test:ci
- name: Run librechat-data-provider unit tests
run: cd packages/data-provider && npm run test:ci
- name: Run librechat-mcp unit tests
run: cd packages/mcp && npm run test:ci
run: cd packages/data-provider && npm run test:ci

View File

@@ -1,58 +0,0 @@
name: Publish `@librechat/data-schemas` to NPM
on:
push:
branches:
- main
paths:
- 'packages/data-schemas/package.json'
workflow_dispatch:
inputs:
reason:
description: 'Reason for manual trigger'
required: false
default: 'Manual publish requested'
jobs:
build-and-publish:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Use Node.js
uses: actions/setup-node@v4
with:
node-version: '18.x'
- name: Install dependencies
run: cd packages/data-schemas && npm ci
- name: Build
run: cd packages/data-schemas && npm run build
- name: Set up npm authentication
run: echo "//registry.npmjs.org/:_authToken=${{ secrets.PUBLISH_NPM_TOKEN }}" > ~/.npmrc
- name: Check version change
id: check
working-directory: packages/data-schemas
run: |
PACKAGE_VERSION=$(node -p "require('./package.json').version")
PUBLISHED_VERSION=$(npm view @librechat/data-schemas version 2>/dev/null || echo "0.0.0")
if [ "$PACKAGE_VERSION" = "$PUBLISHED_VERSION" ]; then
echo "No version change, skipping publish"
echo "skip=true" >> $GITHUB_OUTPUT
else
echo "Version changed, proceeding with publish"
echo "skip=false" >> $GITHUB_OUTPUT
fi
- name: Pack package
if: steps.check.outputs.skip != 'true'
working-directory: packages/data-schemas
run: npm pack
- name: Publish
if: steps.check.outputs.skip != 'true'
working-directory: packages/data-schemas
run: npm publish *.tgz --access public

View File

@@ -84,11 +84,11 @@ jobs:
with:
token: ${{ secrets.GITHUB_TOKEN }}
sign-commits: true
commit-message: "chore: update CHANGELOG for release ${{ github.ref_name }}"
commit-message: "chore: update CHANGELOG for release ${GITHUB_REF##*/}"
base: main
branch: "changelog/${{ github.ref_name }}"
branch: "changelog/${GITHUB_REF##*/}"
reviewers: danny-avila
title: "chore: update CHANGELOG for release ${{ github.ref_name }}"
title: "chore: update CHANGELOG for release ${GITHUB_REF##*/}"
body: |
**Description**:
- This PR updates the CHANGELOG.md by removing the "Unreleased" section and adding new release notes for release ${{ github.ref_name }} above previous releases.
- This PR updates the CHANGELOG.md by removing the "Unreleased" section and adding new release notes for release ${GITHUB_REF##*/} above previous releases.

View File

@@ -1,12 +1,6 @@
name: Detect Unused NPM Packages
on:
pull_request:
paths:
- 'package.json'
- 'package-lock.json'
- 'client/**'
- 'api/**'
on: [pull_request]
jobs:
detect-unused-packages:

9
.gitignore vendored
View File

@@ -37,10 +37,6 @@ client/public/main.js
client/public/main.js.map
client/public/main.js.LICENSE.txt
# Azure Blob Storage Emulator (Azurite)
__azurite**
__blobstorage__/**/*
# Dependency directorys
# Deployed apps should consider commenting these lines out:
# see https://npmjs.org/doc/faq.html#Should-I-check-my-node_modules-folder-into-git
@@ -104,10 +100,13 @@ auth.json
/images
!client/src/components/Nav/SettingsTabs/Data/
!/client/src/@types/i18next.d.ts
# User uploads
uploads/
# owner
release/
!/client/src/@types/i18next.d.ts
# Apple Private Key
*.p8

View File

@@ -1,16 +0,0 @@
# Changelog
All notable changes to this project will be documented in this file.
## [Unreleased]
### ✨ New Features
- 🪄 feat: Agent Artifacts by **@danny-avila** in [#5804](https://github.com/danny-avila/LibreChat/pull/5804)
### ⚙️ Other Changes
- 🔄 chore: Enforce 18next Language Keys by **@rubentalstra** in [#5803](https://github.com/danny-avila/LibreChat/pull/5803)
- 🔃 refactor: Parent Message ID Handling on Error, Update Translations, Bump Agents by **@danny-avila** in [#5833](https://github.com/danny-avila/LibreChat/pull/5833)
---

View File

@@ -1,4 +1,4 @@
# v0.7.7
# v0.7.7-rc1
# Base node image
FROM node:20-alpine AS node

View File

@@ -1,5 +1,5 @@
# Dockerfile.multi
# v0.7.7
# v0.7.7-rc1
# Base for all builds
FROM node:20-alpine AS base-min
@@ -11,7 +11,6 @@ RUN npm config set fetch-retry-maxtimeout 600000 && \
COPY package*.json ./
COPY packages/data-provider/package*.json ./packages/data-provider/
COPY packages/mcp/package*.json ./packages/mcp/
COPY packages/data-schemas/package*.json ./packages/data-schemas/
COPY client/package*.json ./client/
COPY api/package*.json ./api/
@@ -33,13 +32,6 @@ COPY packages/mcp ./
COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist
RUN npm run build
# Build data-schemas
FROM base AS data-schemas-build
WORKDIR /app/packages/data-schemas
COPY packages/data-schemas ./
COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist
RUN npm run build
# Client build
FROM base AS client-build
WORKDIR /app/client
@@ -57,9 +49,8 @@ COPY api ./api
COPY config ./config
COPY --from=data-provider-build /app/packages/data-provider/dist ./packages/data-provider/dist
COPY --from=mcp-build /app/packages/mcp/dist ./packages/mcp/dist
COPY --from=data-schemas-build /app/packages/data-schemas/dist ./packages/data-schemas/dist
COPY --from=client-build /app/client/dist ./client/dist
WORKDIR /app/api
EXPOSE 3080
ENV HOST=0.0.0.0
CMD ["node", "server/index.js"]
CMD ["node", "server/index.js"]

View File

@@ -81,7 +81,7 @@
- [Fork Messages & Conversations](https://www.librechat.ai/docs/features/fork) for Advanced Context control
- 💬 **Multimodal & File Interactions**:
- Upload and analyze images with Claude 3, GPT-4.5, GPT-4o, o1, Llama-Vision, and Gemini 📸
- Upload and analyze images with Claude 3, GPT-4o, o1, Llama-Vision, and Gemini 📸
- Chat with Files using Custom Endpoints, OpenAI, Azure, Anthropic, AWS Bedrock, & Google 🗃️
- 🌎 **Multilingual UI**:
@@ -197,6 +197,6 @@ We thank [Locize](https://locize.com) for their translation management tools tha
<p align="center">
<a href="https://locize.com" target="_blank" rel="noopener noreferrer">
<img src="https://github.com/user-attachments/assets/d6b70894-6064-475e-bb65-92a9e23e0077" alt="Locize Logo" height="50">
<img src="https://locize.com/img/locize_color.svg" alt="Locize Logo" height="50">
</a>
</p>

View File

@@ -2,13 +2,12 @@ const Anthropic = require('@anthropic-ai/sdk');
const { HttpsProxyAgent } = require('https-proxy-agent');
const {
Constants,
ErrorTypes,
EModelEndpoint,
anthropicSettings,
getResponseSender,
validateVisionModel,
} = require('librechat-data-provider');
const { SplitStreamHandler: _Handler, GraphEvents } = require('@librechat/agents');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const {
truncateText,
formatMessage,
@@ -17,31 +16,16 @@ const {
parseParamFromPrompt,
createContextHandlers,
} = require('./prompts');
const {
getClaudeHeaders,
configureReasoning,
checkPromptCacheSupport,
} = require('~/server/services/Endpoints/anthropic/helpers');
const { getModelMaxTokens, getModelMaxOutputTokens, matchModelName } = require('~/utils');
const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const Tokenizer = require('~/server/services/Tokenizer');
const { logger, sendEvent } = require('~/config');
const { sleep } = require('~/server/utils');
const BaseClient = require('./BaseClient');
const { logger } = require('~/config');
const HUMAN_PROMPT = '\n\nHuman:';
const AI_PROMPT = '\n\nAssistant:';
class SplitStreamHandler extends _Handler {
getDeltaContent(chunk) {
return (chunk?.delta?.text ?? chunk?.completion) || '';
}
getReasoningDelta(chunk) {
return chunk?.delta?.thinking || '';
}
}
/** Helper function to introduce a delay before retrying */
function delayBeforeRetry(attempts, baseDelay = 1000) {
return new Promise((resolve) => setTimeout(resolve, baseDelay * attempts));
@@ -84,8 +68,6 @@ class AnthropicClient extends BaseClient {
/** The key for the usage object's output tokens
* @type {string} */
this.outputTokensKey = 'output_tokens';
/** @type {SplitStreamHandler | undefined} */
this.streamHandler;
}
setOptions(options) {
@@ -115,10 +97,9 @@ class AnthropicClient extends BaseClient {
const modelMatch = matchModelName(this.modelOptions.model, EModelEndpoint.anthropic);
this.isClaude3 = modelMatch.includes('claude-3');
this.isLegacyOutput = !(
/claude-3[-.]5-sonnet/.test(modelMatch) || /claude-3[-.]7/.test(modelMatch)
);
this.supportsCacheControl = this.options.promptCache && checkPromptCacheSupport(modelMatch);
this.isLegacyOutput = !modelMatch.includes('claude-3-5-sonnet');
this.supportsCacheControl =
this.options.promptCache && this.checkPromptCacheSupport(modelMatch);
if (
this.isLegacyOutput &&
@@ -144,21 +125,16 @@ class AnthropicClient extends BaseClient {
this.options.endpointType ?? this.options.endpoint,
this.options.endpointTokenConfig,
) ??
anthropicSettings.maxOutputTokens.reset(this.modelOptions.model);
1500;
this.maxPromptTokens =
this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens;
const reservedTokens = this.maxPromptTokens + this.maxResponseTokens;
if (reservedTokens > this.maxContextTokens) {
const info = `Total Possible Tokens + Max Output Tokens must be less than or equal to Max Context Tokens: ${this.maxPromptTokens} (total possible output) + ${this.maxResponseTokens} (max output) = ${reservedTokens}/${this.maxContextTokens} (max context)`;
const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`;
logger.warn(info);
throw new Error(errorMessage);
} else if (this.maxResponseTokens === this.maxContextTokens) {
const info = `Max Output Tokens must be less than Max Context Tokens: ${this.maxResponseTokens} (max output) = ${this.maxContextTokens} (max context)`;
const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`;
logger.warn(info);
throw new Error(errorMessage);
if (this.maxPromptTokens + this.maxResponseTokens > this.maxContextTokens) {
throw new Error(
`maxPromptTokens + maxOutputTokens (${this.maxPromptTokens} + ${this.maxResponseTokens} = ${
this.maxPromptTokens + this.maxResponseTokens
}) must be less than or equal to maxContextTokens (${this.maxContextTokens})`,
);
}
this.sender =
@@ -195,9 +171,18 @@ class AnthropicClient extends BaseClient {
options.baseURL = this.options.reverseProxyUrl;
}
const headers = getClaudeHeaders(requestOptions?.model, this.supportsCacheControl);
if (headers) {
options.defaultHeaders = headers;
if (
this.supportsCacheControl &&
requestOptions?.model &&
requestOptions.model.includes('claude-3-5-sonnet')
) {
options.defaultHeaders = {
'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15,prompt-caching-2024-07-31',
};
} else if (this.supportsCacheControl) {
options.defaultHeaders = {
'anthropic-beta': 'prompt-caching-2024-07-31',
};
}
return new Anthropic(options);
@@ -683,48 +668,29 @@ class AnthropicClient extends BaseClient {
* @returns {Promise<Anthropic.default.Message | Anthropic.default.Completion>} The response from the Anthropic client.
*/
async createResponse(client, options, useMessages) {
return (useMessages ?? this.useMessages)
return useMessages ?? this.useMessages
? await client.messages.create(options)
: await client.completions.create(options);
}
getMessageMapMethod() {
/**
* @param {TMessage} msg
*/
return (msg) => {
if (msg.text != null && msg.text && msg.text.startsWith(':::thinking')) {
msg.text = msg.text.replace(/:::thinking.*?:::/gs, '').trim();
} else if (msg.content != null) {
/** @type {import('@librechat/agents').MessageContentComplex} */
const newContent = [];
for (let part of msg.content) {
if (part.think != null) {
continue;
}
newContent.push(part);
}
msg.content = newContent;
}
return msg;
};
}
/**
* @param {string[]} [intermediateReply]
* @returns {string}
* @param {string} modelName
* @returns {boolean}
*/
getStreamText(intermediateReply) {
if (!this.streamHandler) {
return intermediateReply?.join('') ?? '';
checkPromptCacheSupport(modelName) {
const modelMatch = matchModelName(modelName, EModelEndpoint.anthropic);
if (modelMatch.includes('claude-3-5-sonnet-latest')) {
return false;
}
const reasoningText = this.streamHandler.reasoningTokens.join('');
const reasoningBlock = reasoningText.length > 0 ? `:::thinking\n${reasoningText}\n:::\n` : '';
return `${reasoningBlock}${this.streamHandler.tokens.join('')}`;
if (
modelMatch === 'claude-3-5-sonnet' ||
modelMatch === 'claude-3-5-haiku' ||
modelMatch === 'claude-3-haiku' ||
modelMatch === 'claude-3-opus'
) {
return true;
}
return false;
}
async sendCompletion(payload, { onProgress, abortController }) {
@@ -744,6 +710,7 @@ class AnthropicClient extends BaseClient {
user_id: this.user,
};
let text = '';
const {
stream,
model,
@@ -754,34 +721,22 @@ class AnthropicClient extends BaseClient {
topK: top_k,
} = this.modelOptions;
let requestOptions = {
const requestOptions = {
model,
stream: stream || true,
stop_sequences,
temperature,
metadata,
top_p,
top_k,
};
if (this.useMessages) {
requestOptions.messages = payload;
requestOptions.max_tokens =
maxOutputTokens || anthropicSettings.maxOutputTokens.reset(requestOptions.model);
requestOptions.max_tokens = maxOutputTokens || legacy.maxOutputTokens.default;
} else {
requestOptions.prompt = payload;
requestOptions.max_tokens_to_sample = maxOutputTokens || legacy.maxOutputTokens.default;
}
requestOptions = configureReasoning(requestOptions, {
thinking: this.options.thinking,
thinkingBudget: this.options.thinkingBudget,
});
if (!/claude-3[-.]7/.test(model)) {
requestOptions.top_p = top_p;
requestOptions.top_k = top_k;
} else if (requestOptions.thinking == null) {
requestOptions.topP = top_p;
requestOptions.topK = top_k;
requestOptions.max_tokens_to_sample = maxOutputTokens || 1500;
}
if (this.systemMessage && this.supportsCacheControl === true) {
@@ -801,17 +756,13 @@ class AnthropicClient extends BaseClient {
}
logger.debug('[AnthropicClient]', { ...requestOptions });
this.streamHandler = new SplitStreamHandler({
accumulate: true,
runId: this.responseMessageId,
handlers: {
[GraphEvents.ON_RUN_STEP]: (event) => sendEvent(this.options.res, event),
[GraphEvents.ON_MESSAGE_DELTA]: (event) => sendEvent(this.options.res, event),
[GraphEvents.ON_REASONING_DELTA]: (event) => sendEvent(this.options.res, event),
},
});
let intermediateReply = this.streamHandler.tokens;
const handleChunk = (currentChunk) => {
if (currentChunk) {
text += currentChunk;
onProgress(currentChunk);
}
};
const maxRetries = 3;
const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
@@ -832,15 +783,22 @@ class AnthropicClient extends BaseClient {
});
for await (const completion of response) {
// Handle each completion as before
const type = completion?.type ?? '';
if (tokenEventTypes.has(type)) {
logger.debug(`[AnthropicClient] ${type}`, completion);
this[type] = completion;
}
this.streamHandler.handle(completion);
if (completion?.delta?.text) {
handleChunk(completion.delta.text);
} else if (completion.completion) {
handleChunk(completion.completion);
}
await sleep(streamRate);
}
// Successful processing, exit loop
break;
} catch (error) {
attempts += 1;
@@ -850,10 +808,6 @@ class AnthropicClient extends BaseClient {
if (attempts < maxRetries) {
await delayBeforeRetry(attempts, 350);
} else if (this.streamHandler && this.streamHandler.reasoningTokens.length) {
return this.getStreamText();
} else if (intermediateReply.length > 0) {
return this.getStreamText(intermediateReply);
} else {
throw new Error(`Operation failed after ${maxRetries} attempts: ${error.message}`);
}
@@ -869,7 +823,8 @@ class AnthropicClient extends BaseClient {
}
await processResponse.bind(this)();
return this.getStreamText(intermediateReply);
return text.trim();
}
getSaveOptions() {
@@ -879,8 +834,6 @@ class AnthropicClient extends BaseClient {
promptPrefix: this.options.promptPrefix,
modelLabel: this.options.modelLabel,
promptCache: this.options.promptCache,
thinking: this.options.thinking,
thinkingBudget: this.options.thinkingBudget,
resendFiles: this.options.resendFiles,
iconURL: this.options.iconURL,
greeting: this.options.greeting,

View File

@@ -5,15 +5,13 @@ const {
isAgentsEndpoint,
isParamEndpoint,
EModelEndpoint,
ContentTypes,
excludedKeys,
ErrorTypes,
Constants,
} = require('librechat-data-provider');
const { getMessages, saveMessage, updateMessage, saveConvo, getConvo } = require('~/models');
const { checkBalance } = require('~/models/balanceMethods');
const { getMessages, saveMessage, updateMessage, saveConvo } = require('~/models');
const { addSpaceIfNeeded, isEnabled } = require('~/server/utils');
const { truncateToolCallOutputs } = require('./prompts');
const { addSpaceIfNeeded } = require('~/server/utils');
const checkBalance = require('~/models/checkBalance');
const { getFiles } = require('~/models/File');
const TextStream = require('./TextStream');
const { logger } = require('~/config');
@@ -57,10 +55,6 @@ class BaseClient {
* Flag to determine if the client re-submitted the latest assistant message.
* @type {boolean | undefined} */
this.continued;
/**
* Flag to determine if the client has already fetched the conversation while saving new messages.
* @type {boolean | undefined} */
this.fetchedConvo;
/** @type {TMessage[]} */
this.currentMessages = [];
/** @type {import('librechat-data-provider').VisionModes | undefined} */
@@ -366,14 +360,17 @@ class BaseClient {
* context: TMessage[],
* remainingContextTokens: number,
* messagesToRefine: TMessage[],
* }>} An object with three properties: `context`, `remainingContextTokens`, and `messagesToRefine`.
* summaryIndex: number,
* }>} An object with four properties: `context`, `summaryIndex`, `remainingContextTokens`, and `messagesToRefine`.
* `context` is an array of messages that fit within the token limit.
* `summaryIndex` is the index of the first message in the `messagesToRefine` array.
* `remainingContextTokens` is the number of tokens remaining within the limit after adding the messages to the context.
* `messagesToRefine` is an array of messages that were not added to the context because they would have exceeded the token limit.
*/
async getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, instructions }) {
// Every reply is primed with <|start|>assistant<|message|>, so we
// start with 3 tokens for the label after all messages have been counted.
let summaryIndex = -1;
let currentTokenCount = 3;
const instructionsTokenCount = instructions?.tokenCount ?? 0;
let remainingContextTokens =
@@ -406,12 +403,14 @@ class BaseClient {
}
const prunedMemory = messages;
summaryIndex = prunedMemory.length - 1;
remainingContextTokens -= currentTokenCount;
return {
context: context.reverse(),
remainingContextTokens,
messagesToRefine: prunedMemory,
summaryIndex,
};
}
@@ -454,7 +453,7 @@ class BaseClient {
let orderedWithInstructions = this.addInstructions(orderedMessages, instructions);
let { context, remainingContextTokens, messagesToRefine } =
let { context, remainingContextTokens, messagesToRefine, summaryIndex } =
await this.getMessagesWithinTokenLimit({
messages: orderedWithInstructions,
instructions,
@@ -524,7 +523,7 @@ class BaseClient {
}
// Make sure to only continue summarization logic if the summary message was generated
shouldSummarize = summaryMessage != null && shouldSummarize === true;
shouldSummarize = summaryMessage && shouldSummarize;
logger.debug('[BaseClient] Context Count (2/2)', {
remainingContextTokens,
@@ -534,18 +533,17 @@ class BaseClient {
/** @type {Record<string, number> | undefined} */
let tokenCountMap;
if (buildTokenMap) {
const currentPayload = shouldSummarize ? orderedWithInstructions : context;
tokenCountMap = currentPayload.reduce((map, message, index) => {
tokenCountMap = orderedWithInstructions.reduce((map, message, index) => {
const { messageId } = message;
if (!messageId) {
return map;
}
if (shouldSummarize && index === messagesToRefine.length - 1 && !usePrevSummary) {
if (shouldSummarize && index === summaryIndex && !usePrevSummary) {
map.summaryMessage = { ...summaryMessage, messageId, tokenCount: summaryTokenCount };
}
map[messageId] = currentPayload[index].tokenCount;
map[messageId] = orderedWithInstructions[index].tokenCount;
return map;
}, {});
}
@@ -634,9 +632,8 @@ class BaseClient {
}
}
const balance = this.options.req?.app?.locals?.balance;
if (
balance?.enabled &&
isEnabled(process.env.CHECK_BALANCE) &&
supportsBalanceCheck[this.options.endpointType ?? this.options.endpoint]
) {
await checkBalance({
@@ -866,40 +863,16 @@ class BaseClient {
return { message: savedMessage };
}
const fieldsToKeep = {
conversationId: message.conversationId,
endpoint: this.options.endpoint,
endpointType: this.options.endpointType,
...endpointOptions,
};
const existingConvo =
this.fetchedConvo === true
? null
: await getConvo(this.options.req?.user?.id, message.conversationId);
const unsetFields = {};
const exceptions = new Set(['spec', 'iconURL']);
if (existingConvo != null) {
this.fetchedConvo = true;
for (const key in existingConvo) {
if (!key) {
continue;
}
if (excludedKeys.has(key) && !exceptions.has(key)) {
continue;
}
if (endpointOptions?.[key] === undefined) {
unsetFields[key] = 1;
}
}
}
const conversation = await saveConvo(this.options.req, fieldsToKeep, {
context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveConvo',
unsetFields,
});
const conversation = await saveConvo(
this.options.req,
{
conversationId: message.conversationId,
endpoint: this.options.endpoint,
endpointType: this.options.endpointType,
...endpointOptions,
},
{ context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveConvo' },
);
return { message: savedMessage, conversation };
}
@@ -1020,17 +993,11 @@ class BaseClient {
const processValue = (value) => {
if (Array.isArray(value)) {
for (let item of value) {
if (
!item ||
!item.type ||
item.type === ContentTypes.THINK ||
item.type === ContentTypes.ERROR ||
item.type === ContentTypes.IMAGE_URL
) {
if (!item || !item.type || item.type === 'image_url') {
continue;
}
if (item.type === ContentTypes.TOOL_CALL && item.tool_call != null) {
if (item.type === 'tool_call' && item.tool_call != null) {
const toolName = item.tool_call?.name || '';
if (toolName != null && toolName && typeof toolName === 'string') {
numTokens += this.getTokenCount(toolName);
@@ -1126,13 +1093,9 @@ class BaseClient {
return message;
}
const files = await getFiles(
{
file_id: { $in: fileIds },
},
{},
{},
);
const files = await getFiles({
file_id: { $in: fileIds },
});
await this.addImageURLs(message, files, this.visionMode);

View File

@@ -198,11 +198,7 @@ class GoogleClient extends BaseClient {
*/
checkVisionRequest(attachments) {
/* Validation vision request */
this.defaultVisionModel =
this.options.visionModel ??
(!EXCLUDED_GENAI_MODELS.test(this.modelOptions.model)
? this.modelOptions.model
: 'gemini-pro-vision');
this.defaultVisionModel = this.options.visionModel ?? 'gemini-pro-vision';
const availableModels = this.options.modelsConfig?.[EModelEndpoint.google];
this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels });
@@ -831,8 +827,7 @@ class GoogleClient extends BaseClient {
let reply = '';
const { abortController } = options;
const model =
this.options.titleModel ?? this.modelOptions.modelName ?? this.modelOptions.model ?? '';
const model = this.modelOptions.modelName ?? this.modelOptions.model ?? '';
const safetySettings = getSafetySettings(model);
if (!EXCLUDED_GENAI_MODELS.test(model) && !this.project_id) {
logger.debug('Identified titling model as GenAI version');

View File

@@ -5,7 +5,6 @@ const { SplitStreamHandler, GraphEvents } = require('@librechat/agents');
const {
Constants,
ImageDetail,
ContentTypes,
EModelEndpoint,
resolveHeaders,
KnownEndpoints,
@@ -110,15 +109,15 @@ class OpenAIClient extends BaseClient {
const omniPattern = /\b(o1|o3)\b/i;
this.isOmni = omniPattern.test(this.modelOptions.model);
const { OPENAI_FORCE_PROMPT } = process.env ?? {};
const { OPENROUTER_API_KEY, OPENAI_FORCE_PROMPT } = process.env ?? {};
if (OPENROUTER_API_KEY && !this.azure) {
this.apiKey = OPENROUTER_API_KEY;
this.useOpenRouter = true;
}
const { reverseProxyUrl: reverseProxy } = this.options;
if (
!this.useOpenRouter &&
((reverseProxy && reverseProxy.includes(KnownEndpoints.openrouter)) ||
(this.options.endpoint &&
this.options.endpoint.toLowerCase().includes(KnownEndpoints.openrouter)))
) {
if (!this.useOpenRouter && reverseProxy && reverseProxy.includes(KnownEndpoints.openrouter)) {
this.useOpenRouter = true;
}
@@ -226,6 +225,10 @@ class OpenAIClient extends BaseClient {
logger.debug('Using Azure endpoint');
}
if (this.useOpenRouter) {
this.completionsUrl = 'https://openrouter.ai/api/v1/chat/completions';
}
return this;
}
@@ -300,9 +303,7 @@ class OpenAIClient extends BaseClient {
}
getEncoding() {
return this.modelOptions?.model && /gpt-4[^-\s]/.test(this.modelOptions.model)
? 'o200k_base'
: 'cl100k_base';
return this.model?.includes('gpt-4o') ? 'o200k_base' : 'cl100k_base';
}
/**
@@ -502,24 +503,8 @@ class OpenAIClient extends BaseClient {
if (promptPrefix && this.isOmni === true) {
const lastUserMessageIndex = payload.findLastIndex((message) => message.role === 'user');
if (lastUserMessageIndex !== -1) {
if (Array.isArray(payload[lastUserMessageIndex].content)) {
const firstTextPartIndex = payload[lastUserMessageIndex].content.findIndex(
(part) => part.type === ContentTypes.TEXT,
);
if (firstTextPartIndex !== -1) {
const firstTextPart = payload[lastUserMessageIndex].content[firstTextPartIndex];
payload[lastUserMessageIndex].content[firstTextPartIndex].text =
`${promptPrefix}\n${firstTextPart.text}`;
} else {
payload[lastUserMessageIndex].content.unshift({
type: ContentTypes.TEXT,
text: promptPrefix,
});
}
} else {
payload[lastUserMessageIndex].content =
`${promptPrefix}\n${payload[lastUserMessageIndex].content}`;
}
payload[lastUserMessageIndex].content =
`${promptPrefix}\n${payload[lastUserMessageIndex].content}`;
}
}
@@ -625,7 +610,7 @@ class OpenAIClient extends BaseClient {
}
initializeLLM({
model = openAISettings.model.default,
model = 'gpt-4o-mini',
modelName,
temperature = 0.2,
max_tokens,
@@ -726,7 +711,7 @@ class OpenAIClient extends BaseClient {
const { OPENAI_TITLE_MODEL } = process.env ?? {};
let model = this.options.titleModel ?? OPENAI_TITLE_MODEL ?? openAISettings.model.default;
let model = this.options.titleModel ?? OPENAI_TITLE_MODEL ?? 'gpt-4o-mini';
if (model === Constants.CURRENT_MODEL) {
model = this.modelOptions.model;
}
@@ -919,7 +904,7 @@ ${convo}
let prompt;
// TODO: remove the gpt fallback and make it specific to endpoint
const { OPENAI_SUMMARY_MODEL = openAISettings.model.default } = process.env ?? {};
const { OPENAI_SUMMARY_MODEL = 'gpt-4o-mini' } = process.env ?? {};
let model = this.options.summaryModel ?? OPENAI_SUMMARY_MODEL;
if (model === Constants.CURRENT_MODEL) {
model = this.modelOptions.model;
@@ -1120,16 +1105,6 @@ ${convo}
return (msg) => {
if (msg.text != null && msg.text && msg.text.startsWith(':::thinking')) {
msg.text = msg.text.replace(/:::thinking.*?:::/gs, '').trim();
} else if (msg.content != null) {
/** @type {import('@librechat/agents').MessageContentComplex} */
const newContent = [];
for (let part of msg.content) {
if (part.think != null) {
continue;
}
newContent.push(part);
}
msg.content = newContent;
}
return msg;
@@ -1181,6 +1156,10 @@ ${convo}
opts.httpAgent = new HttpsProxyAgent(this.options.proxy);
}
if (this.isVisionModel) {
modelOptions.max_tokens = 4000;
}
/** @type {TAzureConfig | undefined} */
const azureConfig = this.options?.req?.app?.locals?.[EModelEndpoint.azureOpenAI];
@@ -1291,29 +1270,6 @@ ${convo}
});
}
/** Note: OpenAI Web Search models do not support any known parameters besdies `max_tokens` */
if (modelOptions.model && /gpt-4o.*search/.test(modelOptions.model)) {
const searchExcludeParams = [
'frequency_penalty',
'presence_penalty',
'temperature',
'top_p',
'top_k',
'stop',
'logit_bias',
'seed',
'response_format',
'n',
'logprobs',
'user',
];
this.options.dropParams = this.options.dropParams || [];
this.options.dropParams = [
...new Set([...this.options.dropParams, ...searchExcludeParams]),
];
}
if (this.options.dropParams && Array.isArray(this.options.dropParams)) {
this.options.dropParams.forEach((param) => {
delete modelOptions[param];
@@ -1342,11 +1298,15 @@ ${convo}
let streamResolve;
if (
(!this.isOmni || /^o1-(mini|preview)/i.test(modelOptions.model)) &&
modelOptions.reasoning_effort != null
this.isOmni === true &&
(this.azure || /o1(?!-(?:mini|preview)).*$/.test(modelOptions.model)) &&
!/o3-.*$/.test(this.modelOptions.model) &&
modelOptions.stream
) {
delete modelOptions.stream;
delete modelOptions.stop;
} else if (!this.isOmni && modelOptions.reasoning_effort != null) {
delete modelOptions.reasoning_effort;
delete modelOptions.temperature;
}
let reasoningKey = 'reasoning_content';
@@ -1354,12 +1314,6 @@ ${convo}
modelOptions.include_reasoning = true;
reasoningKey = 'reasoning';
}
if (this.useOpenRouter && modelOptions.reasoning_effort != null) {
modelOptions.reasoning = {
effort: modelOptions.reasoning_effort,
};
delete modelOptions.reasoning_effort;
}
this.streamHandler = new SplitStreamHandler({
reasoningKey,

View File

@@ -5,8 +5,9 @@ const { addImages, buildErrorInput, buildPromptPrefix } = require('./output_pars
const { initializeCustomAgent, initializeFunctionsAgent } = require('./agents');
const { processFileURL } = require('~/server/services/Files/process');
const { EModelEndpoint } = require('librechat-data-provider');
const { checkBalance } = require('~/models/balanceMethods');
const { formatLangChainMessages } = require('./prompts');
const checkBalance = require('~/models/checkBalance');
const { isEnabled } = require('~/server/utils');
const { extractBaseURL } = require('~/utils');
const { loadTools } = require('./tools/util');
const { logger } = require('~/config');
@@ -335,8 +336,7 @@ class PluginsClient extends OpenAIClient {
}
}
const balance = this.options.req?.app?.locals?.balance;
if (balance?.enabled) {
if (isEnabled(process.env.CHECK_BALANCE)) {
await checkBalance({
req: this.options.req,
res: this.options.res,

View File

@@ -1,8 +1,8 @@
const { promptTokensEstimate } = require('openai-chat-tokens');
const { EModelEndpoint, supportsBalanceCheck } = require('librechat-data-provider');
const { formatFromLangChain } = require('~/app/clients/prompts');
const { getBalanceConfig } = require('~/server/services/Config');
const { checkBalance } = require('~/models/balanceMethods');
const checkBalance = require('~/models/checkBalance');
const { isEnabled } = require('~/server/utils');
const { logger } = require('~/config');
const createStartHandler = ({
@@ -49,8 +49,8 @@ const createStartHandler = ({
prelimPromptTokens += tokenBuffer;
try {
const balance = await getBalanceConfig();
if (balance?.enabled && supportsBalanceCheck[EModelEndpoint.openAI]) {
// TODO: if plugins extends to non-OpenAI models, this will need to be updated
if (isEnabled(process.env.CHECK_BALANCE) && supportsBalanceCheck[EModelEndpoint.openAI]) {
const generations =
initialMessageCount && messages.length > initialMessageCount
? messages.slice(initialMessageCount)

View File

@@ -1,7 +1,7 @@
/**
* Anthropic API: Adds cache control to the appropriate user messages in the payload.
* @param {Array<AnthropicMessage | BaseMessage>} messages - The array of message objects.
* @returns {Array<AnthropicMessage | BaseMessage>} - The updated array of message objects with cache control added.
* @param {Array<AnthropicMessage>} messages - The array of message objects.
* @returns {Array<AnthropicMessage>} - The updated array of message objects with cache control added.
*/
function addCacheControl(messages) {
if (!Array.isArray(messages) || messages.length < 2) {
@@ -13,9 +13,7 @@ function addCacheControl(messages) {
for (let i = updatedMessages.length - 1; i >= 0 && userMessagesModified < 2; i--) {
const message = updatedMessages[i];
if (message.getType != null && message.getType() !== 'human') {
continue;
} else if (message.getType == null && message.role !== 'user') {
if (message.role !== 'user') {
continue;
}

View File

@@ -325,37 +325,4 @@ describe('formatAgentMessages', () => {
);
expect(result[0].content).not.toContain('Analyzing the problem...');
});
it('should exclude ERROR type content parts', () => {
const payload = [
{
role: 'assistant',
content: [
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Hello there' },
{
type: ContentTypes.ERROR,
[ContentTypes.ERROR]:
'An error occurred while processing the request: Something went wrong',
},
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Final answer' },
],
},
];
const result = formatAgentMessages(payload);
expect(result).toHaveLength(1);
expect(result[0]).toBeInstanceOf(AIMessage);
expect(result[0].content).toEqual([
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Hello there' },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Final answer' },
]);
// Make sure no error content exists in the result
const hasErrorContent = result[0].content.some(
(item) =>
item.type === ContentTypes.ERROR || JSON.stringify(item).includes('An error occurred'),
);
expect(hasErrorContent).toBe(false);
});
});

View File

@@ -211,8 +211,6 @@ const formatAgentMessages = (payload) => {
} else if (part.type === ContentTypes.THINK) {
hasReasoning = true;
continue;
} else if (part.type === ContentTypes.ERROR || part.type === ContentTypes.AGENT_UPDATE) {
continue;
} else {
currentContent.push(part);
}

View File

@@ -1,4 +1,3 @@
const { SplitStreamHandler } = require('@librechat/agents');
const { anthropicSettings } = require('librechat-data-provider');
const AnthropicClient = require('~/app/clients/AnthropicClient');
@@ -406,327 +405,4 @@ describe('AnthropicClient', () => {
expect(Number.isNaN(result)).toBe(false);
});
});
describe('maxOutputTokens handling for different models', () => {
it('should not cap maxOutputTokens for Claude 3.5 Sonnet models', () => {
const client = new AnthropicClient('test-api-key');
const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 10;
client.setOptions({
modelOptions: {
model: 'claude-3-5-sonnet',
maxOutputTokens: highTokenValue,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
// Test with decimal notation
client.setOptions({
modelOptions: {
model: 'claude-3.5-sonnet',
maxOutputTokens: highTokenValue,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
});
it('should not cap maxOutputTokens for Claude 3.7 models', () => {
const client = new AnthropicClient('test-api-key');
const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 2;
client.setOptions({
modelOptions: {
model: 'claude-3-7-sonnet',
maxOutputTokens: highTokenValue,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
// Test with decimal notation
client.setOptions({
modelOptions: {
model: 'claude-3.7-sonnet',
maxOutputTokens: highTokenValue,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
});
it('should cap maxOutputTokens for Claude 3.5 Haiku models', () => {
const client = new AnthropicClient('test-api-key');
const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 2;
client.setOptions({
modelOptions: {
model: 'claude-3-5-haiku',
maxOutputTokens: highTokenValue,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(
anthropicSettings.legacy.maxOutputTokens.default,
);
// Test with decimal notation
client.setOptions({
modelOptions: {
model: 'claude-3.5-haiku',
maxOutputTokens: highTokenValue,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(
anthropicSettings.legacy.maxOutputTokens.default,
);
});
it('should cap maxOutputTokens for Claude 3 Haiku and Opus models', () => {
const client = new AnthropicClient('test-api-key');
const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 2;
// Test haiku
client.setOptions({
modelOptions: {
model: 'claude-3-haiku',
maxOutputTokens: highTokenValue,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(
anthropicSettings.legacy.maxOutputTokens.default,
);
// Test opus
client.setOptions({
modelOptions: {
model: 'claude-3-opus',
maxOutputTokens: highTokenValue,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(
anthropicSettings.legacy.maxOutputTokens.default,
);
});
});
describe('topK/topP parameters for different models', () => {
beforeEach(() => {
// Mock the SplitStreamHandler
jest.spyOn(SplitStreamHandler.prototype, 'handle').mockImplementation(() => {});
});
afterEach(() => {
jest.restoreAllMocks();
});
it('should include top_k and top_p parameters for non-claude-3.7 models', async () => {
const client = new AnthropicClient('test-api-key');
// Create a mock async generator function
async function* mockAsyncGenerator() {
yield { type: 'message_start', message: { usage: {} } };
yield { delta: { text: 'Test response' } };
yield { type: 'message_delta', usage: {} };
}
// Mock createResponse to return the async generator
jest.spyOn(client, 'createResponse').mockImplementation(() => {
return mockAsyncGenerator();
});
client.setOptions({
modelOptions: {
model: 'claude-3-opus',
temperature: 0.7,
topK: 10,
topP: 0.9,
},
});
// Mock getClient to capture the request options
let capturedOptions = null;
jest.spyOn(client, 'getClient').mockImplementation((options) => {
capturedOptions = options;
return {};
});
const payload = [{ role: 'user', content: 'Test message' }];
await client.sendCompletion(payload, {});
// Check the options passed to getClient
expect(capturedOptions).toHaveProperty('top_k', 10);
expect(capturedOptions).toHaveProperty('top_p', 0.9);
});
it('should include top_k and top_p parameters for claude-3-5-sonnet models', async () => {
const client = new AnthropicClient('test-api-key');
// Create a mock async generator function
async function* mockAsyncGenerator() {
yield { type: 'message_start', message: { usage: {} } };
yield { delta: { text: 'Test response' } };
yield { type: 'message_delta', usage: {} };
}
// Mock createResponse to return the async generator
jest.spyOn(client, 'createResponse').mockImplementation(() => {
return mockAsyncGenerator();
});
client.setOptions({
modelOptions: {
model: 'claude-3-5-sonnet',
temperature: 0.7,
topK: 10,
topP: 0.9,
},
});
// Mock getClient to capture the request options
let capturedOptions = null;
jest.spyOn(client, 'getClient').mockImplementation((options) => {
capturedOptions = options;
return {};
});
const payload = [{ role: 'user', content: 'Test message' }];
await client.sendCompletion(payload, {});
// Check the options passed to getClient
expect(capturedOptions).toHaveProperty('top_k', 10);
expect(capturedOptions).toHaveProperty('top_p', 0.9);
});
it('should not include top_k and top_p parameters for claude-3-7-sonnet models', async () => {
const client = new AnthropicClient('test-api-key');
// Create a mock async generator function
async function* mockAsyncGenerator() {
yield { type: 'message_start', message: { usage: {} } };
yield { delta: { text: 'Test response' } };
yield { type: 'message_delta', usage: {} };
}
// Mock createResponse to return the async generator
jest.spyOn(client, 'createResponse').mockImplementation(() => {
return mockAsyncGenerator();
});
client.setOptions({
modelOptions: {
model: 'claude-3-7-sonnet',
temperature: 0.7,
topK: 10,
topP: 0.9,
},
});
// Mock getClient to capture the request options
let capturedOptions = null;
jest.spyOn(client, 'getClient').mockImplementation((options) => {
capturedOptions = options;
return {};
});
const payload = [{ role: 'user', content: 'Test message' }];
await client.sendCompletion(payload, {});
// Check the options passed to getClient
expect(capturedOptions).not.toHaveProperty('top_k');
expect(capturedOptions).not.toHaveProperty('top_p');
});
it('should not include top_k and top_p parameters for models with decimal notation (claude-3.7)', async () => {
const client = new AnthropicClient('test-api-key');
// Create a mock async generator function
async function* mockAsyncGenerator() {
yield { type: 'message_start', message: { usage: {} } };
yield { delta: { text: 'Test response' } };
yield { type: 'message_delta', usage: {} };
}
// Mock createResponse to return the async generator
jest.spyOn(client, 'createResponse').mockImplementation(() => {
return mockAsyncGenerator();
});
client.setOptions({
modelOptions: {
model: 'claude-3.7-sonnet',
temperature: 0.7,
topK: 10,
topP: 0.9,
},
});
// Mock getClient to capture the request options
let capturedOptions = null;
jest.spyOn(client, 'getClient').mockImplementation((options) => {
capturedOptions = options;
return {};
});
const payload = [{ role: 'user', content: 'Test message' }];
await client.sendCompletion(payload, {});
// Check the options passed to getClient
expect(capturedOptions).not.toHaveProperty('top_k');
expect(capturedOptions).not.toHaveProperty('top_p');
});
});
it('should include top_k and top_p parameters for Claude-3.7 models when thinking is explicitly disabled', async () => {
const client = new AnthropicClient('test-api-key', {
modelOptions: {
model: 'claude-3-7-sonnet',
temperature: 0.7,
topK: 10,
topP: 0.9,
},
thinking: false,
});
async function* mockAsyncGenerator() {
yield { type: 'message_start', message: { usage: {} } };
yield { delta: { text: 'Test response' } };
yield { type: 'message_delta', usage: {} };
}
jest.spyOn(client, 'createResponse').mockImplementation(() => {
return mockAsyncGenerator();
});
let capturedOptions = null;
jest.spyOn(client, 'getClient').mockImplementation((options) => {
capturedOptions = options;
return {};
});
const payload = [{ role: 'user', content: 'Test message' }];
await client.sendCompletion(payload, {});
expect(capturedOptions).toHaveProperty('topK', 10);
expect(capturedOptions).toHaveProperty('topP', 0.9);
client.setOptions({
modelOptions: {
model: 'claude-3.7-sonnet',
temperature: 0.7,
topK: 10,
topP: 0.9,
},
thinking: false,
});
await client.sendCompletion(payload, {});
expect(capturedOptions).toHaveProperty('topK', 10);
expect(capturedOptions).toHaveProperty('topP', 0.9);
});
});

View File

@@ -30,8 +30,6 @@ jest.mock('~/models', () => ({
updateFileUsage: jest.fn(),
}));
const { getConvo, saveConvo } = require('~/models');
jest.mock('@langchain/openai', () => {
return {
ChatOpenAI: jest.fn().mockImplementation(() => {
@@ -164,7 +162,7 @@ describe('BaseClient', () => {
const result = await TestClient.getMessagesWithinTokenLimit({ messages });
expect(result.context).toEqual(expectedContext);
expect(result.messagesToRefine.length - 1).toEqual(expectedIndex);
expect(result.summaryIndex).toEqual(expectedIndex);
expect(result.remainingContextTokens).toBe(expectedRemainingContextTokens);
expect(result.messagesToRefine).toEqual(expectedMessagesToRefine);
});
@@ -200,7 +198,7 @@ describe('BaseClient', () => {
const result = await TestClient.getMessagesWithinTokenLimit({ messages });
expect(result.context).toEqual(expectedContext);
expect(result.messagesToRefine.length - 1).toEqual(expectedIndex);
expect(result.summaryIndex).toEqual(expectedIndex);
expect(result.remainingContextTokens).toBe(expectedRemainingContextTokens);
expect(result.messagesToRefine).toEqual(expectedMessagesToRefine);
});
@@ -542,11 +540,10 @@ describe('BaseClient', () => {
test('saveMessageToDatabase is called with the correct arguments', async () => {
const saveOptions = TestClient.getSaveOptions();
const user = {};
const user = {}; // Mock user
const opts = { user };
const saveSpy = jest.spyOn(TestClient, 'saveMessageToDatabase');
await TestClient.sendMessage('Hello, world!', opts);
expect(saveSpy).toHaveBeenCalledWith(
expect(TestClient.saveMessageToDatabase).toHaveBeenCalledWith(
expect.objectContaining({
sender: expect.any(String),
text: expect.any(String),
@@ -560,157 +557,6 @@ describe('BaseClient', () => {
);
});
test('should handle existing conversation when getConvo retrieves one', async () => {
const existingConvo = {
conversationId: 'existing-convo-id',
endpoint: 'openai',
endpointType: 'openai',
model: 'gpt-3.5-turbo',
messages: [
{ role: 'user', content: 'Existing message 1' },
{ role: 'assistant', content: 'Existing response 1' },
],
temperature: 1,
};
const { temperature: _temp, ...newConvo } = existingConvo;
const user = {
id: 'user-id',
};
getConvo.mockResolvedValue(existingConvo);
saveConvo.mockResolvedValue(newConvo);
TestClient = initializeFakeClient(
apiKey,
{
...options,
req: {
user,
},
},
[],
);
const saveSpy = jest.spyOn(TestClient, 'saveMessageToDatabase');
const newMessage = 'New message in existing conversation';
const response = await TestClient.sendMessage(newMessage, {
user,
conversationId: existingConvo.conversationId,
});
expect(getConvo).toHaveBeenCalledWith(user.id, existingConvo.conversationId);
expect(TestClient.conversationId).toBe(existingConvo.conversationId);
expect(response.conversationId).toBe(existingConvo.conversationId);
expect(TestClient.fetchedConvo).toBe(true);
expect(saveSpy).toHaveBeenCalledWith(
expect.objectContaining({
conversationId: existingConvo.conversationId,
text: newMessage,
}),
expect.any(Object),
expect.any(Object),
);
expect(saveConvo).toHaveBeenCalledTimes(2);
expect(saveConvo).toHaveBeenCalledWith(
expect.any(Object),
expect.objectContaining({
conversationId: existingConvo.conversationId,
}),
expect.objectContaining({
context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveConvo',
unsetFields: {
temperature: 1,
},
}),
);
await TestClient.sendMessage('Another message', {
conversationId: existingConvo.conversationId,
});
expect(getConvo).toHaveBeenCalledTimes(1);
});
test('should correctly handle existing conversation and unset fields appropriately', async () => {
const existingConvo = {
conversationId: 'existing-convo-id',
endpoint: 'openai',
endpointType: 'openai',
model: 'gpt-3.5-turbo',
messages: [
{ role: 'user', content: 'Existing message 1' },
{ role: 'assistant', content: 'Existing response 1' },
],
title: 'Existing Conversation',
someExistingField: 'existingValue',
anotherExistingField: 'anotherValue',
temperature: 0.7,
modelLabel: 'GPT-3.5',
};
getConvo.mockResolvedValue(existingConvo);
saveConvo.mockResolvedValue(existingConvo);
TestClient = initializeFakeClient(
apiKey,
{
...options,
modelOptions: {
model: 'gpt-4',
temperature: 0.5,
},
},
[],
);
const newMessage = 'New message in existing conversation';
await TestClient.sendMessage(newMessage, {
conversationId: existingConvo.conversationId,
});
expect(saveConvo).toHaveBeenCalledTimes(2);
const saveConvoCall = saveConvo.mock.calls[0];
const [, savedFields, saveOptions] = saveConvoCall;
// Instead of checking all excludedKeys, we'll just check specific fields
// that we know should be excluded
expect(savedFields).not.toHaveProperty('messages');
expect(savedFields).not.toHaveProperty('title');
// Only check that someExistingField is in unsetFields
expect(saveOptions.unsetFields).toHaveProperty('someExistingField', 1);
// Mock saveConvo to return the expected fields
saveConvo.mockImplementation((req, fields) => {
return Promise.resolve({
...fields,
endpoint: 'openai',
endpointType: 'openai',
model: 'gpt-4',
temperature: 0.5,
});
});
// Only check the conversationId since that's the only field we can be sure about
expect(savedFields).toHaveProperty('conversationId', 'existing-convo-id');
expect(TestClient.fetchedConvo).toBe(true);
await TestClient.sendMessage('Another message', {
conversationId: existingConvo.conversationId,
});
expect(getConvo).toHaveBeenCalledTimes(1);
const secondSaveConvoCall = saveConvo.mock.calls[1];
expect(secondSaveConvoCall[2]).toHaveProperty('unsetFields', {});
});
test('sendCompletion is called with the correct arguments', async () => {
const payload = {}; // Mock payload
TestClient.buildMessages.mockReturnValue({ prompt: payload, tokenCountMap: null });

View File

@@ -56,6 +56,7 @@ const initializeFakeClient = (apiKey, options, fakeMessages) => {
let TestClient = new FakeClient(apiKey);
TestClient.options = options;
TestClient.abortController = { abort: jest.fn() };
TestClient.saveMessageToDatabase = jest.fn();
TestClient.loadHistory = jest
.fn()
.mockImplementation((conversationId, parentMessageId = null) => {
@@ -85,6 +86,7 @@ const initializeFakeClient = (apiKey, options, fakeMessages) => {
return 'Mock response text';
});
// eslint-disable-next-line no-unused-vars
TestClient.getCompletion = jest.fn().mockImplementation(async (..._args) => {
return {
choices: [

View File

@@ -136,11 +136,10 @@ OpenAI.mockImplementation(() => ({
}));
describe('OpenAIClient', () => {
const mockSet = jest.fn();
const mockCache = { set: mockSet };
beforeEach(() => {
const mockCache = {
get: jest.fn().mockResolvedValue({}),
set: jest.fn(),
};
getLogStores.mockReturnValue(mockCache);
});
let client;
@@ -203,6 +202,14 @@ describe('OpenAIClient', () => {
expect(client.modelOptions.temperature).toBe(0.7);
});
it('should set apiKey and useOpenRouter if OPENROUTER_API_KEY is present', () => {
process.env.OPENROUTER_API_KEY = 'openrouter-key';
client.setOptions({});
expect(client.apiKey).toBe('openrouter-key');
expect(client.useOpenRouter).toBe(true);
delete process.env.OPENROUTER_API_KEY; // Cleanup
});
it('should set FORCE_PROMPT based on OPENAI_FORCE_PROMPT or reverseProxyUrl', () => {
process.env.OPENAI_FORCE_PROMPT = 'true';
client.setOptions({});
@@ -527,6 +534,7 @@ describe('OpenAIClient', () => {
afterEach(() => {
delete process.env.AZURE_OPENAI_DEFAULT_MODEL;
delete process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME;
delete process.env.OPENROUTER_API_KEY;
});
it('should call getCompletion and fetchEventSource when using a text/instruct model', async () => {

View File

@@ -2,10 +2,9 @@ const availableTools = require('./manifest.json');
// Structured Tools
const DALLE3 = require('./structured/DALLE3');
const FluxAPI = require('./structured/FluxAPI');
const OpenWeather = require('./structured/OpenWeather');
const StructuredWolfram = require('./structured/Wolfram');
const createYouTubeTools = require('./structured/YouTube');
const StructuredWolfram = require('./structured/Wolfram');
const StructuredACS = require('./structured/AzureAISearch');
const StructuredSD = require('./structured/StableDiffusion');
const GoogleSearchAPI = require('./structured/GoogleSearch');
@@ -31,7 +30,6 @@ module.exports = {
manifestToolMap,
// Structured Tools
DALLE3,
FluxAPI,
OpenWeather,
StructuredSD,
StructuredACS,

View File

@@ -164,19 +164,5 @@
"description": "Sign up at <a href=\"https://home.openweathermap.org/users/sign_up\" target=\"_blank\">OpenWeather</a>, then get your key at <a href=\"https://home.openweathermap.org/api_keys\" target=\"_blank\">API keys</a>."
}
]
},
{
"name": "Flux",
"pluginKey": "flux",
"description": "Generate images using text with the Flux API.",
"icon": "https://blackforestlabs.ai/wp-content/uploads/2024/07/bfl_logo_retraced_blk.png",
"isAuthRequired": "true",
"authConfig": [
{
"authField": "FLUX_API_KEY",
"label": "Your Flux API Key",
"description": "Provide your Flux API key from your user profile."
}
]
}
]

View File

@@ -1,17 +1,14 @@
const { z } = require('zod');
const path = require('path');
const OpenAI = require('openai');
const fetch = require('node-fetch');
const { v4: uuidv4 } = require('uuid');
const { Tool } = require('@langchain/core/tools');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { FileContext, ContentTypes } = require('librechat-data-provider');
const { FileContext } = require('librechat-data-provider');
const { getImageBasename } = require('~/server/services/Files/images');
const extractBaseURL = require('~/utils/extractBaseURL');
const { logger } = require('~/config');
const displayMessage =
'DALL-E displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.';
class DALLE3 extends Tool {
constructor(fields = {}) {
super();
@@ -117,7 +114,10 @@ class DALLE3 extends Tool {
if (this.isAgent === true && typeof value === 'string') {
return [value, {}];
} else if (this.isAgent === true && typeof value === 'object') {
return [displayMessage, value];
return [
'DALL-E displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.',
value,
];
}
return value;
@@ -160,32 +160,6 @@ Error Message: ${error.message}`);
);
}
if (this.isAgent) {
let fetchOptions = {};
if (process.env.PROXY) {
fetchOptions.agent = new HttpsProxyAgent(process.env.PROXY);
}
const imageResponse = await fetch(theImageUrl, fetchOptions);
const arrayBuffer = await imageResponse.arrayBuffer();
const base64 = Buffer.from(arrayBuffer).toString('base64');
const content = [
{
type: ContentTypes.IMAGE_URL,
image_url: {
url: `data:image/png;base64,${base64}`,
},
},
];
const response = [
{
type: ContentTypes.TEXT,
text: displayMessage,
},
];
return [response, { content }];
}
const imageBasename = getImageBasename(theImageUrl);
const imageExt = path.extname(imageBasename);

View File

@@ -1,554 +0,0 @@
const { z } = require('zod');
const axios = require('axios');
const fetch = require('node-fetch');
const { v4: uuidv4 } = require('uuid');
const { Tool } = require('@langchain/core/tools');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { FileContext, ContentTypes } = require('librechat-data-provider');
const { logger } = require('~/config');
const displayMessage =
'Flux displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.';
/**
* FluxAPI - A tool for generating high-quality images from text prompts using the Flux API.
* Each call generates one image. If multiple images are needed, make multiple consecutive calls with the same or varied prompts.
*/
class FluxAPI extends Tool {
// Pricing constants in USD per image
static PRICING = {
FLUX_PRO_1_1_ULTRA: -0.06, // /v1/flux-pro-1.1-ultra
FLUX_PRO_1_1: -0.04, // /v1/flux-pro-1.1
FLUX_PRO: -0.05, // /v1/flux-pro
FLUX_DEV: -0.025, // /v1/flux-dev
FLUX_PRO_FINETUNED: -0.06, // /v1/flux-pro-finetuned
FLUX_PRO_1_1_ULTRA_FINETUNED: -0.07, // /v1/flux-pro-1.1-ultra-finetuned
};
constructor(fields = {}) {
super();
/** @type {boolean} Used to initialize the Tool without necessary variables. */
this.override = fields.override ?? false;
this.userId = fields.userId;
this.fileStrategy = fields.fileStrategy;
/** @type {boolean} **/
this.isAgent = fields.isAgent;
this.returnMetadata = fields.returnMetadata ?? false;
if (fields.processFileURL) {
/** @type {processFileURL} Necessary for output to contain all image metadata. */
this.processFileURL = fields.processFileURL.bind(this);
}
this.apiKey = fields.FLUX_API_KEY || this.getApiKey();
this.name = 'flux';
this.description =
'Use Flux to generate images from text descriptions. This tool can generate images and list available finetunes. Each generate call creates one image. For multiple images, make multiple consecutive calls.';
this.description_for_model = `// Transform any image description into a detailed, high-quality prompt. Never submit a prompt under 3 sentences. Follow these core rules:
// 1. ALWAYS enhance basic prompts into 5-10 detailed sentences (e.g., "a cat" becomes: "A close-up photo of a sleek Siamese cat with piercing blue eyes. The cat sits elegantly on a vintage leather armchair, its tail curled gracefully around its paws. Warm afternoon sunlight streams through a nearby window, casting gentle shadows across its face and highlighting the subtle variations in its cream and chocolate-point fur. The background is softly blurred, creating a shallow depth of field that draws attention to the cat's expressive features. The overall composition has a peaceful, contemplative mood with a professional photography style.")
// 2. Each prompt MUST be 3-6 descriptive sentences minimum, focusing on visual elements: lighting, composition, mood, and style
// Use action: 'list_finetunes' to see available custom models. When using finetunes, use endpoint: '/v1/flux-pro-finetuned' (default) or '/v1/flux-pro-1.1-ultra-finetuned' for higher quality and aspect ratio.`;
// Add base URL from environment variable with fallback
this.baseUrl = process.env.FLUX_API_BASE_URL || 'https://api.us1.bfl.ai';
// Define the schema for structured input
this.schema = z.object({
action: z
.enum(['generate', 'list_finetunes', 'generate_finetuned'])
.default('generate')
.describe(
'Action to perform: "generate" for image generation, "generate_finetuned" for finetuned model generation, "list_finetunes" to get available custom models',
),
prompt: z
.string()
.optional()
.describe(
'Text prompt for image generation. Required when action is "generate". Not used for list_finetunes.',
),
width: z
.number()
.optional()
.describe(
'Width of the generated image in pixels. Must be a multiple of 32. Default is 1024.',
),
height: z
.number()
.optional()
.describe(
'Height of the generated image in pixels. Must be a multiple of 32. Default is 768.',
),
prompt_upsampling: z
.boolean()
.optional()
.default(false)
.describe('Whether to perform upsampling on the prompt.'),
steps: z
.number()
.int()
.optional()
.describe('Number of steps to run the model for, a number from 1 to 50. Default is 40.'),
seed: z.number().optional().describe('Optional seed for reproducibility.'),
safety_tolerance: z
.number()
.optional()
.default(6)
.describe(
'Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict.',
),
endpoint: z
.enum([
'/v1/flux-pro-1.1',
'/v1/flux-pro',
'/v1/flux-dev',
'/v1/flux-pro-1.1-ultra',
'/v1/flux-pro-finetuned',
'/v1/flux-pro-1.1-ultra-finetuned',
])
.optional()
.default('/v1/flux-pro-1.1')
.describe('Endpoint to use for image generation.'),
raw: z
.boolean()
.optional()
.default(false)
.describe(
'Generate less processed, more natural-looking images. Only works for /v1/flux-pro-1.1-ultra.',
),
finetune_id: z.string().optional().describe('ID of the finetuned model to use'),
finetune_strength: z
.number()
.optional()
.default(1.1)
.describe('Strength of the finetuning effect (typically between 0.1 and 1.2)'),
guidance: z.number().optional().default(2.5).describe('Guidance scale for finetuned models'),
aspect_ratio: z
.string()
.optional()
.default('16:9')
.describe('Aspect ratio for ultra models (e.g., "16:9")'),
});
}
getAxiosConfig() {
const config = {};
if (process.env.PROXY) {
config.httpsAgent = new HttpsProxyAgent(process.env.PROXY);
}
return config;
}
/** @param {Object|string} value */
getDetails(value) {
if (typeof value === 'string') {
return value;
}
return JSON.stringify(value, null, 2);
}
getApiKey() {
const apiKey = process.env.FLUX_API_KEY || '';
if (!apiKey && !this.override) {
throw new Error('Missing FLUX_API_KEY environment variable.');
}
return apiKey;
}
wrapInMarkdown(imageUrl) {
const serverDomain = process.env.DOMAIN_SERVER || 'http://localhost:3080';
return `![generated image](${serverDomain}${imageUrl})`;
}
returnValue(value) {
if (this.isAgent === true && typeof value === 'string') {
return [value, {}];
} else if (this.isAgent === true && typeof value === 'object') {
if (Array.isArray(value)) {
return value;
}
return [displayMessage, value];
}
return value;
}
async _call(data) {
const { action = 'generate', ...imageData } = data;
// Use provided API key for this request if available, otherwise use default
const requestApiKey = this.apiKey || this.getApiKey();
// Handle list_finetunes action
if (action === 'list_finetunes') {
return this.getMyFinetunes(requestApiKey);
}
// Handle finetuned generation
if (action === 'generate_finetuned') {
return this.generateFinetunedImage(imageData, requestApiKey);
}
// For generate action, ensure prompt is provided
if (!imageData.prompt) {
throw new Error('Missing required field: prompt');
}
let payload = {
prompt: imageData.prompt,
prompt_upsampling: imageData.prompt_upsampling || false,
safety_tolerance: imageData.safety_tolerance || 6,
output_format: imageData.output_format || 'png',
};
// Add optional parameters if provided
if (imageData.width) {
payload.width = imageData.width;
}
if (imageData.height) {
payload.height = imageData.height;
}
if (imageData.steps) {
payload.steps = imageData.steps;
}
if (imageData.seed !== undefined) {
payload.seed = imageData.seed;
}
if (imageData.raw) {
payload.raw = imageData.raw;
}
const generateUrl = `${this.baseUrl}${imageData.endpoint || '/v1/flux-pro'}`;
const resultUrl = `${this.baseUrl}/v1/get_result`;
logger.debug('[FluxAPI] Generating image with payload:', payload);
logger.debug('[FluxAPI] Using endpoint:', generateUrl);
let taskResponse;
try {
taskResponse = await axios.post(generateUrl, payload, {
headers: {
'x-key': requestApiKey,
'Content-Type': 'application/json',
Accept: 'application/json',
},
...this.getAxiosConfig(),
});
} catch (error) {
const details = this.getDetails(error?.response?.data || error.message);
logger.error('[FluxAPI] Error while submitting task:', details);
return this.returnValue(
`Something went wrong when trying to generate the image. The Flux API may be unavailable:
Error Message: ${details}`,
);
}
const taskId = taskResponse.data.id;
// Polling for the result
let status = 'Pending';
let resultData = null;
while (status !== 'Ready' && status !== 'Error') {
try {
// Wait 2 seconds between polls
await new Promise((resolve) => setTimeout(resolve, 2000));
const resultResponse = await axios.get(resultUrl, {
headers: {
'x-key': requestApiKey,
Accept: 'application/json',
},
params: { id: taskId },
...this.getAxiosConfig(),
});
status = resultResponse.data.status;
if (status === 'Ready') {
resultData = resultResponse.data.result;
break;
} else if (status === 'Error') {
logger.error('[FluxAPI] Error in task:', resultResponse.data);
return this.returnValue('An error occurred during image generation.');
}
} catch (error) {
const details = this.getDetails(error?.response?.data || error.message);
logger.error('[FluxAPI] Error while getting result:', details);
return this.returnValue('An error occurred while retrieving the image.');
}
}
// If no result data
if (!resultData || !resultData.sample) {
logger.error('[FluxAPI] No image data received from API. Response:', resultData);
return this.returnValue('No image data received from Flux API.');
}
// Try saving the image locally
const imageUrl = resultData.sample;
const imageName = `img-${uuidv4()}.png`;
if (this.isAgent) {
try {
// Fetch the image and convert to base64
const fetchOptions = {};
if (process.env.PROXY) {
fetchOptions.agent = new HttpsProxyAgent(process.env.PROXY);
}
const imageResponse = await fetch(imageUrl, fetchOptions);
const arrayBuffer = await imageResponse.arrayBuffer();
const base64 = Buffer.from(arrayBuffer).toString('base64');
const content = [
{
type: ContentTypes.IMAGE_URL,
image_url: {
url: `data:image/png;base64,${base64}`,
},
},
];
const response = [
{
type: ContentTypes.TEXT,
text: displayMessage,
},
];
return [response, { content }];
} catch (error) {
logger.error('Error processing image for agent:', error);
return this.returnValue(`Failed to process the image. ${error.message}`);
}
}
try {
logger.debug('[FluxAPI] Saving image:', imageUrl);
const result = await this.processFileURL({
fileStrategy: this.fileStrategy,
userId: this.userId,
URL: imageUrl,
fileName: imageName,
basePath: 'images',
context: FileContext.image_generation,
});
logger.debug('[FluxAPI] Image saved to path:', result.filepath);
// Calculate cost based on endpoint
/**
* TODO: Cost handling
const endpoint = imageData.endpoint || '/v1/flux-pro';
const endpointKey = Object.entries(FluxAPI.PRICING).find(([key, _]) =>
endpoint.includes(key.toLowerCase().replace(/_/g, '-')),
)?.[0];
const cost = FluxAPI.PRICING[endpointKey] || 0;
*/
this.result = this.returnMetadata ? result : this.wrapInMarkdown(result.filepath);
return this.returnValue(this.result);
} catch (error) {
const details = this.getDetails(error?.message ?? 'No additional error details.');
logger.error('Error while saving the image:', details);
return this.returnValue(`Failed to save the image locally. ${details}`);
}
}
async getMyFinetunes(apiKey = null) {
const finetunesUrl = `${this.baseUrl}/v1/my_finetunes`;
const detailsUrl = `${this.baseUrl}/v1/finetune_details`;
try {
const headers = {
'x-key': apiKey || this.getApiKey(),
'Content-Type': 'application/json',
Accept: 'application/json',
};
// Get list of finetunes
const response = await axios.get(finetunesUrl, {
headers,
...this.getAxiosConfig(),
});
const finetunes = response.data.finetunes;
// Fetch details for each finetune
const finetuneDetails = await Promise.all(
finetunes.map(async (finetuneId) => {
try {
const detailResponse = await axios.get(`${detailsUrl}?finetune_id=${finetuneId}`, {
headers,
...this.getAxiosConfig(),
});
return {
id: finetuneId,
...detailResponse.data,
};
} catch (error) {
logger.error(`[FluxAPI] Error fetching details for finetune ${finetuneId}:`, error);
return {
id: finetuneId,
error: 'Failed to fetch details',
};
}
}),
);
if (this.isAgent) {
const formattedDetails = JSON.stringify(finetuneDetails, null, 2);
return [`Here are the available finetunes:\n${formattedDetails}`, null];
}
return JSON.stringify(finetuneDetails);
} catch (error) {
const details = this.getDetails(error?.response?.data || error.message);
logger.error('[FluxAPI] Error while getting finetunes:', details);
const errorMsg = `Failed to get finetunes: ${details}`;
return this.isAgent ? this.returnValue([errorMsg, {}]) : new Error(errorMsg);
}
}
async generateFinetunedImage(imageData, requestApiKey) {
if (!imageData.prompt) {
throw new Error('Missing required field: prompt');
}
if (!imageData.finetune_id) {
throw new Error(
'Missing required field: finetune_id for finetuned generation. Please supply a finetune_id!',
);
}
// Validate endpoint is appropriate for finetuned generation
const validFinetunedEndpoints = ['/v1/flux-pro-finetuned', '/v1/flux-pro-1.1-ultra-finetuned'];
const endpoint = imageData.endpoint || '/v1/flux-pro-finetuned';
if (!validFinetunedEndpoints.includes(endpoint)) {
throw new Error(
`Invalid endpoint for finetuned generation. Must be one of: ${validFinetunedEndpoints.join(', ')}`,
);
}
let payload = {
prompt: imageData.prompt,
prompt_upsampling: imageData.prompt_upsampling || false,
safety_tolerance: imageData.safety_tolerance || 6,
output_format: imageData.output_format || 'png',
finetune_id: imageData.finetune_id,
finetune_strength: imageData.finetune_strength || 1.0,
guidance: imageData.guidance || 2.5,
};
// Add optional parameters if provided
if (imageData.width) {
payload.width = imageData.width;
}
if (imageData.height) {
payload.height = imageData.height;
}
if (imageData.steps) {
payload.steps = imageData.steps;
}
if (imageData.seed !== undefined) {
payload.seed = imageData.seed;
}
if (imageData.raw) {
payload.raw = imageData.raw;
}
const generateUrl = `${this.baseUrl}${endpoint}`;
const resultUrl = `${this.baseUrl}/v1/get_result`;
logger.debug('[FluxAPI] Generating finetuned image with payload:', payload);
logger.debug('[FluxAPI] Using endpoint:', generateUrl);
let taskResponse;
try {
taskResponse = await axios.post(generateUrl, payload, {
headers: {
'x-key': requestApiKey,
'Content-Type': 'application/json',
Accept: 'application/json',
},
...this.getAxiosConfig(),
});
} catch (error) {
const details = this.getDetails(error?.response?.data || error.message);
logger.error('[FluxAPI] Error while submitting finetuned task:', details);
return this.returnValue(
`Something went wrong when trying to generate the finetuned image. The Flux API may be unavailable:
Error Message: ${details}`,
);
}
const taskId = taskResponse.data.id;
// Polling for the result
let status = 'Pending';
let resultData = null;
while (status !== 'Ready' && status !== 'Error') {
try {
// Wait 2 seconds between polls
await new Promise((resolve) => setTimeout(resolve, 2000));
const resultResponse = await axios.get(resultUrl, {
headers: {
'x-key': requestApiKey,
Accept: 'application/json',
},
params: { id: taskId },
...this.getAxiosConfig(),
});
status = resultResponse.data.status;
if (status === 'Ready') {
resultData = resultResponse.data.result;
break;
} else if (status === 'Error') {
logger.error('[FluxAPI] Error in finetuned task:', resultResponse.data);
return this.returnValue('An error occurred during finetuned image generation.');
}
} catch (error) {
const details = this.getDetails(error?.response?.data || error.message);
logger.error('[FluxAPI] Error while getting finetuned result:', details);
return this.returnValue('An error occurred while retrieving the finetuned image.');
}
}
// If no result data
if (!resultData || !resultData.sample) {
logger.error('[FluxAPI] No image data received from API. Response:', resultData);
return this.returnValue('No image data received from Flux API.');
}
// Try saving the image locally
const imageUrl = resultData.sample;
const imageName = `img-${uuidv4()}.png`;
try {
logger.debug('[FluxAPI] Saving finetuned image:', imageUrl);
const result = await this.processFileURL({
fileStrategy: this.fileStrategy,
userId: this.userId,
URL: imageUrl,
fileName: imageName,
basePath: 'images',
context: FileContext.image_generation,
});
logger.debug('[FluxAPI] Finetuned image saved to path:', result.filepath);
// Calculate cost based on endpoint
const endpointKey = endpoint.includes('ultra')
? 'FLUX_PRO_1_1_ULTRA_FINETUNED'
: 'FLUX_PRO_FINETUNED';
const cost = FluxAPI.PRICING[endpointKey] || 0;
// Return the result based on returnMetadata flag
this.result = this.returnMetadata ? result : this.wrapInMarkdown(result.filepath);
return this.returnValue(this.result);
} catch (error) {
const details = this.getDetails(error?.message ?? 'No additional error details.');
logger.error('Error while saving the finetuned image:', details);
return this.returnValue(`Failed to save the finetuned image locally. ${details}`);
}
}
}
module.exports = FluxAPI;

View File

@@ -6,13 +6,10 @@ const axios = require('axios');
const sharp = require('sharp');
const { v4: uuidv4 } = require('uuid');
const { Tool } = require('@langchain/core/tools');
const { FileContext, ContentTypes } = require('librechat-data-provider');
const { FileContext } = require('librechat-data-provider');
const paths = require('~/config/paths');
const { logger } = require('~/config');
const displayMessage =
'Stable Diffusion displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.';
class StableDiffusionAPI extends Tool {
constructor(fields) {
super();
@@ -24,8 +21,6 @@ class StableDiffusionAPI extends Tool {
this.override = fields.override ?? false;
/** @type {boolean} Necessary for output to contain all image metadata. */
this.returnMetadata = fields.returnMetadata ?? false;
/** @type {boolean} */
this.isAgent = fields.isAgent;
if (fields.uploadImageBuffer) {
/** @type {uploadImageBuffer} Necessary for output to contain all image metadata. */
this.uploadImageBuffer = fields.uploadImageBuffer.bind(this);
@@ -71,16 +66,6 @@ class StableDiffusionAPI extends Tool {
return `![generated image](/${imageUrl})`;
}
returnValue(value) {
if (this.isAgent === true && typeof value === 'string') {
return [value, {}];
} else if (this.isAgent === true && typeof value === 'object') {
return [displayMessage, value];
}
return value;
}
getServerURL() {
const url = process.env.SD_WEBUI_URL || '';
if (!url && !this.override) {
@@ -128,25 +113,6 @@ class StableDiffusionAPI extends Tool {
}
try {
if (this.isAgent) {
const content = [
{
type: ContentTypes.IMAGE_URL,
image_url: {
url: `data:image/png;base64,${image}`,
},
},
];
const response = [
{
type: ContentTypes.TEXT,
text: displayMessage,
},
];
return [response, { content }];
}
const buffer = Buffer.from(image.split(',', 1)[0], 'base64');
if (this.returnMetadata && this.uploadImageBuffer && this.req) {
const file = await this.uploadImageBuffer({
@@ -188,7 +154,7 @@ class StableDiffusionAPI extends Tool {
logger.error('[StableDiffusion] Error while saving the image:', error);
}
return this.returnValue(this.result);
return this.result;
}
}

View File

@@ -10,7 +10,6 @@ const {
GoogleSearchAPI,
// Structured Tools
DALLE3,
FluxAPI,
OpenWeather,
StructuredSD,
StructuredACS,
@@ -21,7 +20,6 @@ const {
} = require('../');
const { primeFiles: primeCodeFiles } = require('~/server/services/Files/Code/process');
const { createFileSearchTool, primeFiles: primeSearchFiles } = require('./fileSearch');
const { loadAuthValues } = require('~/server/services/Tools/credentials');
const { createMCPTool } = require('~/server/services/MCP');
const { loadSpecs } = require('./loadSpecs');
const { logger } = require('~/config');
@@ -91,6 +89,45 @@ const validateTools = async (user, tools = []) => {
}
};
const loadAuthValues = async ({ userId, authFields, throwError = true }) => {
let authValues = {};
/**
* Finds the first non-empty value for the given authentication field, supporting alternate fields.
* @param {string[]} fields Array of strings representing the authentication fields. Supports alternate fields delimited by "||".
* @returns {Promise<{ authField: string, authValue: string} | null>} An object containing the authentication field and value, or null if not found.
*/
const findAuthValue = async (fields) => {
for (const field of fields) {
let value = process.env[field];
if (value) {
return { authField: field, authValue: value };
}
try {
value = await getUserPluginAuthValue(userId, field, throwError);
} catch (err) {
if (field === fields[fields.length - 1] && !value) {
throw err;
}
}
if (value) {
return { authField: field, authValue: value };
}
}
return null;
};
for (let authField of authFields) {
const fields = authField.split('||');
const result = await findAuthValue(fields);
if (result) {
authValues[result.authField] = result.authValue;
}
}
return authValues;
};
/** @typedef {typeof import('@langchain/core/tools').Tool} ToolConstructor */
/** @typedef {import('@langchain/core/tools').Tool} Tool */
@@ -145,7 +182,6 @@ const loadTools = async ({
returnMap = false,
}) => {
const toolConstructors = {
flux: FluxAPI,
calculator: Calculator,
google: GoogleSearchAPI,
open_weather: OpenWeather,
@@ -194,10 +230,9 @@ const loadTools = async ({
};
const toolOptions = {
flux: imageGenOptions,
serpapi: { location: 'Austin,Texas,United States', hl: 'en', gl: 'us' },
dalle: imageGenOptions,
'stable-diffusion': imageGenOptions,
serpapi: { location: 'Austin,Texas,United States', hl: 'en', gl: 'us' },
};
const toolContextMap = {};
@@ -310,6 +345,7 @@ const loadTools = async ({
module.exports = {
loadToolWithAuth,
loadAuthValues,
validateTools,
loadTools,
};

View File

@@ -1,8 +1,9 @@
const { validateTools, loadTools } = require('./handleTools');
const { validateTools, loadTools, loadAuthValues } = require('./handleTools');
const handleOpenAIErrors = require('./handleOpenAIErrors');
module.exports = {
handleOpenAIErrors,
loadAuthValues,
validateTools,
loadTools,
};

View File

@@ -49,10 +49,6 @@ const genTitle = isRedisEnabled
? new Keyv({ store: keyvRedis, ttl: Time.TWO_MINUTES })
: new Keyv({ namespace: CacheKeys.GEN_TITLE, ttl: Time.TWO_MINUTES });
const s3ExpiryInterval = isRedisEnabled
? new Keyv({ store: keyvRedis, ttl: Time.THIRTY_MINUTES })
: new Keyv({ namespace: CacheKeys.S3_EXPIRY_INTERVAL, ttl: Time.THIRTY_MINUTES });
const modelQueries = isEnabled(process.env.USE_REDIS)
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.MODEL_QUERIES });
@@ -93,7 +89,6 @@ const namespaces = {
[CacheKeys.ABORT_KEYS]: abortKeys,
[CacheKeys.TOKEN_CONFIG]: tokenConfig,
[CacheKeys.GEN_TITLE]: genTitle,
[CacheKeys.S3_EXPIRY_INTERVAL]: s3ExpiryInterval,
[CacheKeys.MODEL_QUERIES]: modelQueries,
[CacheKeys.AUDIO_RUNS]: audioRuns,
[CacheKeys.MESSAGES]: messages,

4
api/cache/index.js vendored
View File

@@ -1,5 +1,7 @@
const keyvFiles = require('./keyvFiles');
const getLogStores = require('./getLogStores');
const logViolation = require('./logViolation');
const mongoUserStore = require('./mongoUserStore');
const mongoChallengeStore = require('./mongoChallengeStore');
module.exports = { ...keyvFiles, getLogStores, logViolation };
module.exports = { ...keyvFiles, getLogStores, logViolation, mongoUserStore, mongoChallengeStore };

View File

@@ -9,7 +9,7 @@ const { REDIS_URI, USE_REDIS, USE_REDIS_CLUSTER, REDIS_CA, REDIS_KEY_PREFIX, RED
let keyvRedis;
const redis_prefix = REDIS_KEY_PREFIX || '';
const redis_max_listeners = Number(REDIS_MAX_LISTENERS) || 40;
const redis_max_listeners = REDIS_MAX_LISTENERS || 10;
function mapURI(uri) {
const regex =
@@ -77,10 +77,10 @@ if (REDIS_URI && isEnabled(USE_REDIS)) {
keyvRedis.on('error', (err) => logger.error('KeyvRedis connection error:', err));
keyvRedis.setMaxListeners(redis_max_listeners);
logger.info(
'[Optional] Redis initialized. If you have issues, or seeing older values, disable it or flush cache to refresh values.',
'[Optional] Redis initialized. Note: Redis support is experimental. If you have issues, disable it. Cache needs to be flushed for values to refresh.',
);
} else {
logger.info('[Optional] Redis not initialized.');
logger.info('[Optional] Redis not initialized. Note: Redis support is experimental.');
}
module.exports = keyvRedis;

35
api/cache/mongoChallengeStore.js vendored Normal file
View File

@@ -0,0 +1,35 @@
const ChallengeStore = require('~/models/ChallengeStore');
class MongoChallengeStore {
async get(userId) {
try {
const challenge = await ChallengeStore.findOne({ userId }).lean().exec();
return challenge ? challenge.challenge : undefined;
} catch (error) {
console.error(`❌ Error fetching challenge for userId ${userId}:`, error);
return undefined;
}
}
async save(userId, challenge) {
try {
await ChallengeStore.findOneAndUpdate(
{ userId },
{ challenge, createdAt: new Date() },
{ upsert: true, new: true, setDefaultsOnInsert: true },
).exec();
} catch (error) {
console.error(`❌ Error saving challenge for userId ${userId}:`, error);
}
}
async delete(userId) {
try {
await ChallengeStore.deleteOne({ userId }).exec();
} catch (error) {
console.error(`❌ Error deleting challenge for userId ${userId}:`, error);
}
}
}
module.exports = MongoChallengeStore;

55
api/cache/mongoUserStore.js vendored Normal file
View File

@@ -0,0 +1,55 @@
const User = require('~/models');
class MongoUserStore {
async get(identifier, byID = false) {
let user;
if (byID) {
user = await User.getUserById(identifier);
} else {
user = await User.findUser({ email: identifier });
}
if (user) {
return {
id: user._id.toString(),
email: user.email,
passkeys: user.passkeys,
};
}
return undefined;
}
async save(user) {
if (!user.id) {
const createdUser = await User.createUser(
{
email: user.email,
username: user.email,
passkeys: user.passkeys,
},
/* disableTTL */ true,
/* returnUser */ true,
);
return {
id: createdUser._id.toString(),
email: createdUser.email,
passkeys: createdUser.passkeys,
};
} else {
const updatedUser = await User.updateUser(user.id, {
email: user.email,
username: user.email,
passkeys: user.passkeys,
});
if (!updatedUser) {
throw new Error('Failed to update user');
}
return {
id: updatedUser._id.toString(),
email: updatedUser.email,
passkeys: updatedUser.passkeys,
};
}
}
}
module.exports = MongoUserStore;

View File

@@ -1,4 +1,3 @@
const axios = require('axios');
const { EventSource } = require('eventsource');
const { Time, CacheKeys } = require('librechat-data-provider');
const logger = require('./winston');
@@ -48,46 +47,9 @@ const sendEvent = (res, event) => {
res.write(`event: message\ndata: ${JSON.stringify(event)}\n\n`);
};
/**
* Creates and configures an Axios instance with optional proxy settings.
*
* @typedef {import('axios').AxiosInstance} AxiosInstance
* @typedef {import('axios').AxiosProxyConfig} AxiosProxyConfig
*
* @returns {AxiosInstance} A configured Axios instance
* @throws {Error} If there's an issue creating the Axios instance or parsing the proxy URL
*/
function createAxiosInstance() {
const instance = axios.create();
if (process.env.proxy) {
try {
const url = new URL(process.env.proxy);
/** @type {AxiosProxyConfig} */
const proxyConfig = {
host: url.hostname.replace(/^\[|\]$/g, ''),
protocol: url.protocol.replace(':', ''),
};
if (url.port) {
proxyConfig.port = parseInt(url.port, 10);
}
instance.defaults.proxy = proxyConfig;
} catch (error) {
console.error('Error parsing proxy URL:', error);
throw new Error(`Invalid proxy URL: ${process.env.proxy}`);
}
}
return instance;
}
module.exports = {
logger,
sendEvent,
getMCPManager,
createAxiosInstance,
getFlowStateManager,
};

View File

@@ -1,126 +0,0 @@
const axios = require('axios');
const { createAxiosInstance } = require('./index');
// Mock axios
jest.mock('axios', () => ({
interceptors: {
request: { use: jest.fn(), eject: jest.fn() },
response: { use: jest.fn(), eject: jest.fn() },
},
create: jest.fn().mockReturnValue({
defaults: {
proxy: null,
},
get: jest.fn().mockResolvedValue({ data: {} }),
post: jest.fn().mockResolvedValue({ data: {} }),
put: jest.fn().mockResolvedValue({ data: {} }),
delete: jest.fn().mockResolvedValue({ data: {} }),
}),
get: jest.fn().mockResolvedValue({ data: {} }),
post: jest.fn().mockResolvedValue({ data: {} }),
put: jest.fn().mockResolvedValue({ data: {} }),
delete: jest.fn().mockResolvedValue({ data: {} }),
reset: jest.fn().mockImplementation(function () {
this.get.mockClear();
this.post.mockClear();
this.put.mockClear();
this.delete.mockClear();
this.create.mockClear();
}),
}));
describe('createAxiosInstance', () => {
const originalEnv = process.env;
beforeEach(() => {
// Reset mocks
jest.clearAllMocks();
// Create a clean copy of process.env
process.env = { ...originalEnv };
// Default: no proxy
delete process.env.proxy;
});
afterAll(() => {
// Restore original process.env
process.env = originalEnv;
});
test('creates an axios instance without proxy when no proxy env is set', () => {
const instance = createAxiosInstance();
expect(axios.create).toHaveBeenCalledTimes(1);
expect(instance.defaults.proxy).toBeNull();
});
test('configures proxy correctly with hostname and protocol', () => {
process.env.proxy = 'http://example.com';
const instance = createAxiosInstance();
expect(axios.create).toHaveBeenCalledTimes(1);
expect(instance.defaults.proxy).toEqual({
host: 'example.com',
protocol: 'http',
});
});
test('configures proxy correctly with hostname, protocol and port', () => {
process.env.proxy = 'https://proxy.example.com:8080';
const instance = createAxiosInstance();
expect(axios.create).toHaveBeenCalledTimes(1);
expect(instance.defaults.proxy).toEqual({
host: 'proxy.example.com',
protocol: 'https',
port: 8080,
});
});
test('handles proxy URLs with authentication', () => {
process.env.proxy = 'http://user:pass@proxy.example.com:3128';
const instance = createAxiosInstance();
expect(axios.create).toHaveBeenCalledTimes(1);
expect(instance.defaults.proxy).toEqual({
host: 'proxy.example.com',
protocol: 'http',
port: 3128,
// Note: The current implementation doesn't handle auth - if needed, add this functionality
});
});
test('throws error when proxy URL is invalid', () => {
process.env.proxy = 'invalid-url';
expect(() => createAxiosInstance()).toThrow('Invalid proxy URL');
expect(axios.create).toHaveBeenCalledTimes(1);
});
// If you want to test the actual URL parsing more thoroughly
test('handles edge case proxy URLs correctly', () => {
// IPv6 address
process.env.proxy = 'http://[::1]:8080';
let instance = createAxiosInstance();
expect(instance.defaults.proxy).toEqual({
host: '::1',
protocol: 'http',
port: 8080,
});
// URL with path (which should be ignored for proxy config)
process.env.proxy = 'http://proxy.example.com:8080/some/path';
instance = createAxiosInstance();
expect(instance.defaults.proxy).toEqual({
host: 'proxy.example.com',
protocol: 'http',
port: 8080,
});
});
});

View File

@@ -4,11 +4,7 @@ require('winston-daily-rotate-file');
const logDir = path.join(__dirname, '..', 'logs');
const { NODE_ENV, DEBUG_LOGGING = false } = process.env;
const useDebugLogging =
(typeof DEBUG_LOGGING === 'string' && DEBUG_LOGGING?.toLowerCase() === 'true') ||
DEBUG_LOGGING === true;
const { NODE_ENV } = process.env;
const levels = {
error: 0,
@@ -40,10 +36,9 @@ const fileFormat = winston.format.combine(
winston.format.splat(),
);
const logLevel = useDebugLogging ? 'debug' : 'error';
const transports = [
new winston.transports.DailyRotateFile({
level: logLevel,
level: 'debug',
filename: `${logDir}/meiliSync-%DATE%.log`,
datePattern: 'YYYY-MM-DD',
zippedArchive: true,
@@ -53,6 +48,14 @@ const transports = [
}),
];
// if (NODE_ENV !== 'production') {
// transports.push(
// new winston.transports.Console({
// format: winston.format.combine(winston.format.colorize(), winston.format.simple()),
// }),
// );
// }
const consoleFormat = winston.format.combine(
winston.format.colorize({ all: true }),
winston.format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss' }),

View File

@@ -5,7 +5,7 @@ const { redactFormat, redactMessage, debugTraverse, jsonTruncateFormat } = requi
const logDir = path.join(__dirname, '..', 'logs');
const { NODE_ENV, DEBUG_LOGGING = true, CONSOLE_JSON = false, DEBUG_CONSOLE = false } = process.env;
const { NODE_ENV, DEBUG_LOGGING = true, DEBUG_CONSOLE = false, CONSOLE_JSON = false } = process.env;
const useConsoleJson =
(typeof CONSOLE_JSON === 'string' && CONSOLE_JSON?.toLowerCase() === 'true') ||
@@ -15,10 +15,6 @@ const useDebugConsole =
(typeof DEBUG_CONSOLE === 'string' && DEBUG_CONSOLE?.toLowerCase() === 'true') ||
DEBUG_CONSOLE === true;
const useDebugLogging =
(typeof DEBUG_LOGGING === 'string' && DEBUG_LOGGING?.toLowerCase() === 'true') ||
DEBUG_LOGGING === true;
const levels = {
error: 0,
warn: 1,
@@ -61,9 +57,28 @@ const transports = [
maxFiles: '14d',
format: fileFormat,
}),
// new winston.transports.DailyRotateFile({
// level: 'info',
// filename: `${logDir}/info-%DATE%.log`,
// datePattern: 'YYYY-MM-DD',
// zippedArchive: true,
// maxSize: '20m',
// maxFiles: '14d',
// }),
];
if (useDebugLogging) {
// if (NODE_ENV !== 'production') {
// transports.push(
// new winston.transports.Console({
// format: winston.format.combine(winston.format.colorize(), winston.format.simple()),
// }),
// );
// }
if (
(typeof DEBUG_LOGGING === 'string' && DEBUG_LOGGING?.toLowerCase() === 'true') ||
DEBUG_LOGGING === true
) {
transports.push(
new winston.transports.DailyRotateFile({
level: 'debug',
@@ -92,16 +107,10 @@ const consoleFormat = winston.format.combine(
}),
);
// Determine console log level
let consoleLogLevel = 'info';
if (useDebugConsole) {
consoleLogLevel = 'debug';
}
if (useDebugConsole) {
transports.push(
new winston.transports.Console({
level: consoleLogLevel,
level: 'debug',
format: useConsoleJson
? winston.format.combine(fileFormat, jsonTruncateFormat(), winston.format.json())
: winston.format.combine(fileFormat, debugTraverse),
@@ -110,14 +119,14 @@ if (useDebugConsole) {
} else if (useConsoleJson) {
transports.push(
new winston.transports.Console({
level: consoleLogLevel,
level: 'info',
format: winston.format.combine(fileFormat, jsonTruncateFormat(), winston.format.json()),
}),
);
} else {
transports.push(
new winston.transports.Console({
level: consoleLogLevel,
level: 'info',
format: consoleFormat,
}),
);

View File

@@ -1,6 +1,6 @@
const { MeiliSearch } = require('meilisearch');
const { Conversation } = require('~/models/Conversation');
const { Message } = require('~/models/Message');
const Conversation = require('~/models/schema/convoSchema');
const Message = require('~/models/schema/messageSchema');
const { isEnabled } = require('~/server/utils');
const { logger } = require('~/config');

View File

@@ -1,5 +1,5 @@
const mongoose = require('mongoose');
const { actionSchema } = require('@librechat/data-schemas');
const actionSchema = require('./schema/action');
const Action = mongoose.model('action', actionSchema);

View File

@@ -9,7 +9,7 @@ const {
removeAgentFromAllProjects,
} = require('./Project');
const getLogStores = require('~/cache/getLogStores');
const { agentSchema } = require('@librechat/data-schemas');
const agentSchema = require('./schema/agent');
const Agent = mongoose.model('agent', agentSchema);
@@ -46,10 +46,6 @@ const loadAgent = async ({ req, agent_id }) => {
id: agent_id,
});
if (!agent) {
return null;
}
if (agent.author.toString() === req.user.id) {
return agent;
}

View File

@@ -1,5 +1,5 @@
const mongoose = require('mongoose');
const { assistantSchema } = require('@librechat/data-schemas');
const assistantSchema = require('./schema/assistant');
const Assistant = mongoose.model('assistant', assistantSchema);

View File

@@ -1,4 +1,44 @@
const mongoose = require('mongoose');
const { balanceSchema } = require('@librechat/data-schemas');
const balanceSchema = require('./schema/balance');
const { getMultiplier } = require('./tx');
const { logger } = require('~/config');
balanceSchema.statics.check = async function ({
user,
model,
endpoint,
valueKey,
tokenType,
amount,
endpointTokenConfig,
}) {
const multiplier = getMultiplier({ valueKey, tokenType, model, endpoint, endpointTokenConfig });
const tokenCost = amount * multiplier;
const { tokenCredits: balance } = (await this.findOne({ user }, 'tokenCredits').lean()) ?? {};
logger.debug('[Balance.check]', {
user,
model,
endpoint,
valueKey,
tokenType,
amount,
balance,
multiplier,
endpointTokenConfig: !!endpointTokenConfig,
});
if (!balance) {
return {
canSpend: false,
balance: 0,
tokenCost,
};
}
logger.debug('[Balance.check]', { tokenCost });
return { canSpend: balance >= tokenCost, balance, tokenCost };
};
module.exports = mongoose.model('Balance', balanceSchema);

View File

@@ -1,9 +1,5 @@
const mongoose = require('mongoose');
const Banner = require('./schema/banner');
const logger = require('~/config/winston');
const { bannerSchema } = require('@librechat/data-schemas');
const Banner = mongoose.model('Banner', bannerSchema);
/**
* Retrieves the current active banner.
* @returns {Promise<Object|null>} The active banner object or null if no active banner is found.
@@ -28,4 +24,4 @@ const getBanner = async (user) => {
}
};
module.exports = { Banner, getBanner };
module.exports = { getBanner };

View File

@@ -1,4 +1,5 @@
const { logger } = require('~/config');
// const { Categories } = require('./schema/categories');
const options = [
{

View File

@@ -0,0 +1,6 @@
const mongoose = require('mongoose');
const challengeSchema = require('~/models/schema/challengeSchema');
const ChallengeStore = mongoose.model('Challenge', challengeSchema);
module.exports = ChallengeStore;

View File

@@ -15,6 +15,19 @@ const searchConversation = async (conversationId) => {
throw new Error('Error searching conversation');
}
};
/**
* Searches for a conversation by conversationId and returns associated file ids.
* @param {string} conversationId - The conversation's ID.
* @returns {Promise<string[] | null>}
*/
const getConvoFiles = async (conversationId) => {
try {
return (await Conversation.findOne({ conversationId }, 'files').lean())?.files ?? [];
} catch (error) {
logger.error('[getConvoFiles] Error getting conversation files', error);
throw new Error('Error getting conversation files');
}
};
/**
* Retrieves a single conversation for a given user and conversation ID.
@@ -60,20 +73,6 @@ const deleteNullOrEmptyConversations = async () => {
}
};
/**
* Searches for a conversation by conversationId and returns associated file ids.
* @param {string} conversationId - The conversation's ID.
* @returns {Promise<string[] | null>}
*/
const getConvoFiles = async (conversationId) => {
try {
return (await Conversation.findOne({ conversationId }, 'files').lean())?.files ?? [];
} catch (error) {
logger.error('[getConvoFiles] Error getting conversation files', error);
throw new Error('Error getting conversation files');
}
};
module.exports = {
Conversation,
getConvoFiles,
@@ -105,16 +104,10 @@ module.exports = {
update.expiredAt = null;
}
/** @type {{ $set: Partial<TConversation>; $unset?: Record<keyof TConversation, number> }} */
const updateOperation = { $set: update };
if (metadata && metadata.unsetFields && Object.keys(metadata.unsetFields).length > 0) {
updateOperation.$unset = metadata.unsetFields;
}
/** Note: the resulting Model object is necessary for Meilisearch operations */
const conversation = await Conversation.findOneAndUpdate(
{ conversationId, user: req.user.id },
updateOperation,
update,
{
new: true,
upsert: true,

View File

@@ -1,11 +1,7 @@
const mongoose = require('mongoose');
const ConversationTag = require('./schema/conversationTagSchema');
const Conversation = require('./schema/convoSchema');
const logger = require('~/config/winston');
const { conversationTagSchema } = require('@librechat/data-schemas');
const ConversationTag = mongoose.model('ConversationTag', conversationTagSchema);
/**
* Retrieves all conversation tags for a user.
* @param {string} user - The user ID.

View File

@@ -1,6 +1,5 @@
const mongoose = require('mongoose');
const { fileSchema } = require('@librechat/data-schemas');
const { logger } = require('~/config');
const fileSchema = require('./schema/fileSchema');
const File = mongoose.model('File', fileSchema);
@@ -8,7 +7,7 @@ const File = mongoose.model('File', fileSchema);
* Finds a file by its file_id with additional query options.
* @param {string} file_id - The unique identifier of the file.
* @param {object} options - Query options for filtering, projection, etc.
* @returns {Promise<IMongoFile>} A promise that resolves to the file document or null.
* @returns {Promise<MongoFile>} A promise that resolves to the file document or null.
*/
const findFileById = async (file_id, options = {}) => {
return await File.findOne({ file_id, ...options }).lean();
@@ -18,46 +17,18 @@ const findFileById = async (file_id, options = {}) => {
* Retrieves files matching a given filter, sorted by the most recently updated.
* @param {Object} filter - The filter criteria to apply.
* @param {Object} [_sortOptions] - Optional sort parameters.
* @param {Object|String} [selectFields={ text: 0 }] - Fields to include/exclude in the query results.
* Default excludes the 'text' field.
* @returns {Promise<Array<IMongoFile>>} A promise that resolves to an array of file documents.
* @returns {Promise<Array<MongoFile>>} A promise that resolves to an array of file documents.
*/
const getFiles = async (filter, _sortOptions, selectFields = { text: 0 }) => {
const getFiles = async (filter, _sortOptions) => {
const sortOptions = { updatedAt: -1, ..._sortOptions };
return await File.find(filter).select(selectFields).sort(sortOptions).lean();
};
/**
* Retrieves tool files (files that are embedded or have a fileIdentifier) from an array of file IDs
* @param {string[]} fileIds - Array of file_id strings to search for
* @returns {Promise<Array<IMongoFile>>} Files that match the criteria
*/
const getToolFilesByIds = async (fileIds) => {
if (!fileIds || !fileIds.length) {
return [];
}
try {
const filter = {
file_id: { $in: fileIds },
$or: [{ embedded: true }, { 'metadata.fileIdentifier': { $exists: true } }],
};
const selectFields = { text: 0 };
const sortOptions = { updatedAt: -1 };
return await getFiles(filter, sortOptions, selectFields);
} catch (error) {
logger.error('[getToolFilesByIds] Error retrieving tool files:', error);
throw new Error('Error retrieving tool files');
}
return await File.find(filter).sort(sortOptions).lean();
};
/**
* Creates a new file with a TTL of 1 hour.
* @param {IMongoFile} data - The file data to be created, must contain file_id.
* @param {MongoFile} data - The file data to be created, must contain file_id.
* @param {boolean} disableTTL - Whether to disable the TTL.
* @returns {Promise<IMongoFile>} A promise that resolves to the created file document.
* @returns {Promise<MongoFile>} A promise that resolves to the created file document.
*/
const createFile = async (data, disableTTL) => {
const fileData = {
@@ -77,8 +48,8 @@ const createFile = async (data, disableTTL) => {
/**
* Updates a file identified by file_id with new data and removes the TTL.
* @param {IMongoFile} data - The data to update, must contain file_id.
* @returns {Promise<IMongoFile>} A promise that resolves to the updated file document.
* @param {MongoFile} data - The data to update, must contain file_id.
* @returns {Promise<MongoFile>} A promise that resolves to the updated file document.
*/
const updateFile = async (data) => {
const { file_id, ...update } = data;
@@ -91,8 +62,8 @@ const updateFile = async (data) => {
/**
* Increments the usage of a file identified by file_id.
* @param {IMongoFile} data - The data to update, must contain file_id and the increment value for usage.
* @returns {Promise<IMongoFile>} A promise that resolves to the updated file document.
* @param {MongoFile} data - The data to update, must contain file_id and the increment value for usage.
* @returns {Promise<MongoFile>} A promise that resolves to the updated file document.
*/
const updateFileUsage = async (data) => {
const { file_id, inc = 1 } = data;
@@ -106,7 +77,7 @@ const updateFileUsage = async (data) => {
/**
* Deletes a file identified by file_id.
* @param {string} file_id - The unique identifier of the file to delete.
* @returns {Promise<IMongoFile>} A promise that resolves to the deleted file document or null.
* @returns {Promise<MongoFile>} A promise that resolves to the deleted file document or null.
*/
const deleteFile = async (file_id) => {
return await File.findOneAndDelete({ file_id }).lean();
@@ -115,7 +86,7 @@ const deleteFile = async (file_id) => {
/**
* Deletes a file identified by a filter.
* @param {object} filter - The filter criteria to apply.
* @returns {Promise<IMongoFile>} A promise that resolves to the deleted file document or null.
* @returns {Promise<MongoFile>} A promise that resolves to the deleted file document or null.
*/
const deleteFileByFilter = async (filter) => {
return await File.findOneAndDelete(filter).lean();
@@ -134,38 +105,14 @@ const deleteFiles = async (file_ids, user) => {
return await File.deleteMany(deleteQuery);
};
/**
* Batch updates files with new signed URLs in MongoDB
*
* @param {MongoFile[]} updates - Array of updates in the format { file_id, filepath }
* @returns {Promise<void>}
*/
async function batchUpdateFiles(updates) {
if (!updates || updates.length === 0) {
return;
}
const bulkOperations = updates.map((update) => ({
updateOne: {
filter: { file_id: update.file_id },
update: { $set: { filepath: update.filepath } },
},
}));
const result = await File.bulkWrite(bulkOperations);
logger.info(`Updated ${result.modifiedCount} files with new S3 URLs`);
}
module.exports = {
File,
findFileById,
getFiles,
getToolFilesByIds,
createFile,
updateFile,
updateFileUsage,
deleteFile,
deleteFiles,
deleteFileByFilter,
batchUpdateFiles,
};

View File

@@ -1,4 +1,4 @@
const mongoose = require('mongoose');
const { keySchema } = require('@librechat/data-schemas');
const keySchema = require('./schema/key');
module.exports = mongoose.model('Key', keySchema);

View File

@@ -71,42 +71,7 @@ async function saveMessage(req, params, metadata) {
} catch (err) {
logger.error('Error saving message:', err);
logger.info(`---\`saveMessage\` context: ${metadata?.context}`);
// Check if this is a duplicate key error (MongoDB error code 11000)
if (err.code === 11000 && err.message.includes('duplicate key error')) {
// Log the duplicate key error but don't crash the application
logger.warn(`Duplicate messageId detected: ${params.messageId}. Continuing execution.`);
try {
// Try to find the existing message with this ID
const existingMessage = await Message.findOne({
messageId: params.messageId,
user: req.user.id,
});
// If we found it, return it
if (existingMessage) {
return existingMessage.toObject();
}
// If we can't find it (unlikely but possible in race conditions)
return {
...params,
messageId: params.messageId,
user: req.user.id,
};
} catch (findError) {
// If the findOne also fails, log it but don't crash
logger.warn(`Could not retrieve existing message with ID ${params.messageId}: ${findError.message}`);
return {
...params,
messageId: params.messageId,
user: req.user.id,
};
}
}
throw err; // Re-throw other errors
throw err;
}
}

View File

@@ -1,6 +1,6 @@
const { model } = require('mongoose');
const { GLOBAL_PROJECT_NAME } = require('librechat-data-provider').Constants;
const { projectSchema } = require('@librechat/data-schemas');
const projectSchema = require('~/models/schema/projectSchema');
const Project = model('Project', projectSchema);
@@ -9,7 +9,7 @@ const Project = model('Project', projectSchema);
*
* @param {string} projectId - The ID of the project to find and return as a plain object.
* @param {string|string[]} [fieldsToSelect] - The fields to include or exclude in the returned document.
* @returns {Promise<IMongoProject>} A plain object representing the project document, or `null` if no project is found.
* @returns {Promise<MongoProject>} A plain object representing the project document, or `null` if no project is found.
*/
const getProjectById = async function (projectId, fieldsToSelect = null) {
const query = Project.findById(projectId);
@@ -27,7 +27,7 @@ const getProjectById = async function (projectId, fieldsToSelect = null) {
*
* @param {string} projectName - The name of the project to find or create.
* @param {string|string[]} [fieldsToSelect] - The fields to include or exclude in the returned document.
* @returns {Promise<IMongoProject>} A plain object representing the project document.
* @returns {Promise<MongoProject>} A plain object representing the project document.
*/
const getProjectByName = async function (projectName, fieldsToSelect = null) {
const query = { name: projectName };
@@ -47,7 +47,7 @@ const getProjectByName = async function (projectName, fieldsToSelect = null) {
*
* @param {string} projectId - The ID of the project to update.
* @param {string[]} promptGroupIds - The array of prompt group IDs to add to the project.
* @returns {Promise<IMongoProject>} The updated project document.
* @returns {Promise<MongoProject>} The updated project document.
*/
const addGroupIdsToProject = async function (projectId, promptGroupIds) {
return await Project.findByIdAndUpdate(
@@ -62,7 +62,7 @@ const addGroupIdsToProject = async function (projectId, promptGroupIds) {
*
* @param {string} projectId - The ID of the project to update.
* @param {string[]} promptGroupIds - The array of prompt group IDs to remove from the project.
* @returns {Promise<IMongoProject>} The updated project document.
* @returns {Promise<MongoProject>} The updated project document.
*/
const removeGroupIdsFromProject = async function (projectId, promptGroupIds) {
return await Project.findByIdAndUpdate(
@@ -87,7 +87,7 @@ const removeGroupFromAllProjects = async (promptGroupId) => {
*
* @param {string} projectId - The ID of the project to update.
* @param {string[]} agentIds - The array of agent IDs to add to the project.
* @returns {Promise<IMongoProject>} The updated project document.
* @returns {Promise<MongoProject>} The updated project document.
*/
const addAgentIdsToProject = async function (projectId, agentIds) {
return await Project.findByIdAndUpdate(
@@ -102,7 +102,7 @@ const addAgentIdsToProject = async function (projectId, agentIds) {
*
* @param {string} projectId - The ID of the project to update.
* @param {string[]} agentIds - The array of agent IDs to remove from the project.
* @returns {Promise<IMongoProject>} The updated project document.
* @returns {Promise<MongoProject>} The updated project document.
*/
const removeAgentIdsFromProject = async function (projectId, agentIds) {
return await Project.findByIdAndUpdate(

View File

@@ -1,4 +1,3 @@
const mongoose = require('mongoose');
const { ObjectId } = require('mongodb');
const { SystemRoles, SystemCategories, Constants } = require('librechat-data-provider');
const {
@@ -7,13 +6,10 @@ const {
removeGroupIdsFromProject,
removeGroupFromAllProjects,
} = require('./Project');
const { promptGroupSchema, promptSchema } = require('@librechat/data-schemas');
const { Prompt, PromptGroup } = require('./schema/promptSchema');
const { escapeRegExp } = require('~/server/utils');
const { logger } = require('~/config');
const PromptGroup = mongoose.model('PromptGroup', promptGroupSchema);
const Prompt = mongoose.model('Prompt', promptSchema);
/**
* Create a pipeline for the aggregation to get prompt groups
* @param {Object} query

View File

@@ -1,4 +1,3 @@
const mongoose = require('mongoose');
const {
CacheKeys,
SystemRoles,
@@ -7,17 +6,13 @@ const {
removeNullishValues,
agentPermissionsSchema,
promptPermissionsSchema,
runCodePermissionsSchema,
bookmarkPermissionsSchema,
multiConvoPermissionsSchema,
temporaryChatPermissionsSchema,
} = require('librechat-data-provider');
const getLogStores = require('~/cache/getLogStores');
const { roleSchema } = require('@librechat/data-schemas');
const Role = require('~/models/schema/roleSchema');
const { logger } = require('~/config');
const Role = mongoose.model('Role', roleSchema);
/**
* Retrieve a role by name and convert the found role document to a plain object.
* If the role with the given name doesn't exist and the name is a system defined role, create it and return the lean version.
@@ -82,8 +77,6 @@ const permissionSchemas = {
[PermissionTypes.PROMPTS]: promptPermissionsSchema,
[PermissionTypes.BOOKMARKS]: bookmarkPermissionsSchema,
[PermissionTypes.MULTI_CONVO]: multiConvoPermissionsSchema,
[PermissionTypes.TEMPORARY_CHAT]: temporaryChatPermissionsSchema,
[PermissionTypes.RUN_CODE]: runCodePermissionsSchema,
};
/**
@@ -171,7 +164,6 @@ const initializeRoles = async function () {
}
};
module.exports = {
Role,
getRoleByName,
initializeRoles,
updateRoleByName,

View File

@@ -8,7 +8,7 @@ const {
} = require('librechat-data-provider');
const { updateAccessPermissions, initializeRoles } = require('~/models/Role');
const getLogStores = require('~/cache/getLogStores');
const { Role } = require('~/models/Role');
const Role = require('~/models/schema/roleSchema');
// Mock the cache
jest.mock('~/cache/getLogStores', () => {

View File

@@ -1,7 +1,7 @@
const mongoose = require('mongoose');
const signPayload = require('~/server/services/signPayload');
const { hashToken } = require('~/server/utils/crypto');
const { sessionSchema } = require('@librechat/data-schemas');
const sessionSchema = require('./schema/session');
const { logger } = require('~/config');
const Session = mongoose.model('Session', sessionSchema);

View File

@@ -1,9 +1,7 @@
const mongoose = require('mongoose');
const { nanoid } = require('nanoid');
const { Constants } = require('librechat-data-provider');
const { Conversation } = require('~/models/Conversation');
const { shareSchema } = require('@librechat/data-schemas');
const SharedLink = mongoose.model('SharedLink', shareSchema);
const SharedLink = require('./schema/shareSchema');
const { getMessages } = require('./Message');
const logger = require('~/config/winston');

View File

@@ -1,6 +1,6 @@
const mongoose = require('mongoose');
const { encryptV2 } = require('~/server/utils/crypto');
const { tokenSchema } = require('@librechat/data-schemas');
const tokenSchema = require('./schema/tokenSchema');
const { logger } = require('~/config');
/**
@@ -13,13 +13,6 @@ const Token = mongoose.model('Token', tokenSchema);
*/
async function fixIndexes() {
try {
if (
process.env.NODE_ENV === 'CI' ||
process.env.NODE_ENV === 'development' ||
process.env.NODE_ENV === 'test'
) {
return;
}
const indexes = await Token.collection.indexes();
logger.debug('Existing Token Indexes:', JSON.stringify(indexes, null, 2));
const unwantedTTLIndexes = indexes.filter(

View File

@@ -1,11 +1,9 @@
const mongoose = require('mongoose');
const { toolCallSchema } = require('@librechat/data-schemas');
const ToolCall = mongoose.model('ToolCall', toolCallSchema);
const ToolCall = require('./schema/toolCallSchema');
/**
* Create a new tool call
* @param {IToolCallData} toolCallData - The tool call data
* @returns {Promise<IToolCallData>} The created tool call document
* @param {ToolCallData} toolCallData - The tool call data
* @returns {Promise<ToolCallData>} The created tool call document
*/
async function createToolCall(toolCallData) {
try {
@@ -18,7 +16,7 @@ async function createToolCall(toolCallData) {
/**
* Get a tool call by ID
* @param {string} id - The tool call document ID
* @returns {Promise<IToolCallData|null>} The tool call document or null if not found
* @returns {Promise<ToolCallData|null>} The tool call document or null if not found
*/
async function getToolCallById(id) {
try {
@@ -46,7 +44,7 @@ async function getToolCallsByMessage(messageId, userId) {
* Get tool calls by conversation ID and user
* @param {string} conversationId - The conversation ID
* @param {string} userId - The user's ObjectId
* @returns {Promise<IToolCallData[]>} Array of tool call documents
* @returns {Promise<ToolCallData[]>} Array of tool call documents
*/
async function getToolCallsByConvo(conversationId, userId) {
try {
@@ -59,8 +57,8 @@ async function getToolCallsByConvo(conversationId, userId) {
/**
* Update a tool call
* @param {string} id - The tool call document ID
* @param {Partial<IToolCallData>} updateData - The data to update
* @returns {Promise<IToolCallData|null>} The updated tool call document or null if not found
* @param {Partial<ToolCallData>} updateData - The data to update
* @returns {Promise<ToolCallData|null>} The updated tool call document or null if not found
*/
async function updateToolCall(id, updateData) {
try {

View File

@@ -1,144 +1,11 @@
const mongoose = require('mongoose');
const { transactionSchema } = require('@librechat/data-schemas');
const { getBalanceConfig } = require('~/server/services/Config');
const { isEnabled } = require('~/server/utils/handleText');
const transactionSchema = require('./schema/transaction');
const { getMultiplier, getCacheMultiplier } = require('./tx');
const { logger } = require('~/config');
const Balance = require('./Balance');
const cancelRate = 1.15;
/**
* Updates a user's token balance based on a transaction using optimistic concurrency control
* without schema changes. Compatible with DocumentDB.
* @async
* @function
* @param {Object} params - The function parameters.
* @param {string|mongoose.Types.ObjectId} params.user - The user ID.
* @param {number} params.incrementValue - The value to increment the balance by (can be negative).
* @param {import('mongoose').UpdateQuery<import('@librechat/data-schemas').IBalance>['$set']} [params.setValues] - Optional additional fields to set.
* @returns {Promise<Object>} Returns the updated balance document (lean).
* @throws {Error} Throws an error if the update fails after multiple retries.
*/
const updateBalance = async ({ user, incrementValue, setValues }) => {
let maxRetries = 10; // Number of times to retry on conflict
let delay = 50; // Initial retry delay in ms
let lastError = null;
for (let attempt = 1; attempt <= maxRetries; attempt++) {
let currentBalanceDoc;
try {
// 1. Read the current document state
currentBalanceDoc = await Balance.findOne({ user }).lean();
const currentCredits = currentBalanceDoc ? currentBalanceDoc.tokenCredits : 0;
// 2. Calculate the desired new state
const potentialNewCredits = currentCredits + incrementValue;
const newCredits = Math.max(0, potentialNewCredits); // Ensure balance doesn't go below zero
// 3. Prepare the update payload
const updatePayload = {
$set: {
tokenCredits: newCredits,
...(setValues || {}), // Merge other values to set
},
};
// 4. Attempt the conditional update or upsert
let updatedBalance = null;
if (currentBalanceDoc) {
// --- Document Exists: Perform Conditional Update ---
// Try to update only if the tokenCredits match the value we read (currentCredits)
updatedBalance = await Balance.findOneAndUpdate(
{
user: user,
tokenCredits: currentCredits, // Optimistic lock: condition based on the read value
},
updatePayload,
{
new: true, // Return the modified document
// lean: true, // .lean() is applied after query execution in Mongoose >= 6
},
).lean(); // Use lean() for plain JS object
if (updatedBalance) {
// Success! The update was applied based on the expected current state.
return updatedBalance;
}
// If updatedBalance is null, it means tokenCredits changed between read and write (conflict).
lastError = new Error(`Concurrency conflict for user ${user} on attempt ${attempt}.`);
// Proceed to retry logic below.
} else {
// --- Document Does Not Exist: Perform Conditional Upsert ---
// Try to insert the document, but only if it still doesn't exist.
// Using tokenCredits: {$exists: false} helps prevent race conditions where
// another process creates the doc between our findOne and findOneAndUpdate.
try {
updatedBalance = await Balance.findOneAndUpdate(
{
user: user,
// Attempt to match only if the document doesn't exist OR was just created
// without tokenCredits (less likely but possible). A simple { user } filter
// might also work, relying on the retry for conflicts.
// Let's use a simpler filter and rely on retry for races.
// tokenCredits: { $exists: false } // This condition might be too strict if doc exists with 0 credits
},
updatePayload,
{
upsert: true, // Create if doesn't exist
new: true, // Return the created/updated document
// setDefaultsOnInsert: true, // Ensure schema defaults are applied on insert
// lean: true,
},
).lean();
if (updatedBalance) {
// Upsert succeeded (likely created the document)
return updatedBalance;
}
// If null, potentially a rare race condition during upsert. Retry should handle it.
lastError = new Error(
`Upsert race condition suspected for user ${user} on attempt ${attempt}.`,
);
} catch (error) {
if (error.code === 11000) {
// E11000 duplicate key error on index
// This means another process created the document *just* before our upsert.
// It's a concurrency conflict during creation. We should retry.
lastError = error; // Store the error
// Proceed to retry logic below.
} else {
// Different error, rethrow
throw error;
}
}
} // End if/else (document exists?)
} catch (error) {
// Catch errors from findOne or unexpected findOneAndUpdate errors
logger.error(`[updateBalance] Error during attempt ${attempt} for user ${user}:`, error);
lastError = error; // Store the error
// Consider stopping retries for non-transient errors, but for now, we retry.
}
// If we reached here, it means the update failed (conflict or error), wait and retry
if (attempt < maxRetries) {
const jitter = Math.random() * delay * 0.5; // Add jitter to delay
await new Promise((resolve) => setTimeout(resolve, delay + jitter));
delay = Math.min(delay * 2, 2000); // Exponential backoff with cap
}
} // End for loop (retries)
// If loop finishes without success, throw the last encountered error or a generic one
logger.error(
`[updateBalance] Failed to update balance for user ${user} after ${maxRetries} attempts.`,
);
throw (
lastError ||
new Error(
`Failed to update balance for user ${user} after maximum retries due to persistent conflicts.`,
)
);
};
/** Method to calculate and set the tokenValue for a transaction */
transactionSchema.methods.calculateTokenValue = function () {
if (!this.valueKey || !this.tokenType) {
@@ -154,39 +21,6 @@ transactionSchema.methods.calculateTokenValue = function () {
}
};
/**
* New static method to create an auto-refill transaction that does NOT trigger a balance update.
* @param {object} txData - Transaction data.
* @param {string} txData.user - The user ID.
* @param {string} txData.tokenType - The type of token.
* @param {string} txData.context - The context of the transaction.
* @param {number} txData.rawAmount - The raw amount of tokens.
* @returns {Promise<object>} - The created transaction.
*/
transactionSchema.statics.createAutoRefillTransaction = async function (txData) {
if (txData.rawAmount != null && isNaN(txData.rawAmount)) {
return;
}
const transaction = new this(txData);
transaction.endpointTokenConfig = txData.endpointTokenConfig;
transaction.calculateTokenValue();
await transaction.save();
const balanceResponse = await updateBalance({
user: transaction.user,
incrementValue: txData.rawAmount,
setValues: { lastRefill: new Date() },
});
const result = {
rate: transaction.rate,
user: transaction.user.toString(),
balance: balanceResponse.tokenCredits,
};
logger.debug('[Balance.check] Auto-refill performed', result);
result.transaction = transaction;
return result;
};
/**
* Static method to create a transaction and update the balance
* @param {txData} txData - Transaction data.
@@ -203,22 +37,27 @@ transactionSchema.statics.create = async function (txData) {
await transaction.save();
const balance = await getBalanceConfig();
if (!balance?.enabled) {
if (!isEnabled(process.env.CHECK_BALANCE)) {
return;
}
let balance = await Balance.findOne({ user: transaction.user }).lean();
let incrementValue = transaction.tokenValue;
const balanceResponse = await updateBalance({
user: transaction.user,
incrementValue,
});
if (balance && balance?.tokenCredits + incrementValue < 0) {
incrementValue = -balance.tokenCredits;
}
balance = await Balance.findOneAndUpdate(
{ user: transaction.user },
{ $inc: { tokenCredits: incrementValue } },
{ upsert: true, new: true },
).lean();
return {
rate: transaction.rate,
user: transaction.user.toString(),
balance: balanceResponse.tokenCredits,
balance: balance.tokenCredits,
[transaction.tokenType]: incrementValue,
};
};
@@ -239,22 +78,27 @@ transactionSchema.statics.createStructured = async function (txData) {
await transaction.save();
const balance = await getBalanceConfig();
if (!balance?.enabled) {
if (!isEnabled(process.env.CHECK_BALANCE)) {
return;
}
let balance = await Balance.findOne({ user: transaction.user }).lean();
let incrementValue = transaction.tokenValue;
const balanceResponse = await updateBalance({
user: transaction.user,
incrementValue,
});
if (balance && balance?.tokenCredits + incrementValue < 0) {
incrementValue = -balance.tokenCredits;
}
balance = await Balance.findOneAndUpdate(
{ user: transaction.user },
{ $inc: { tokenCredits: incrementValue } },
{ upsert: true, new: true },
).lean();
return {
rate: transaction.rate,
user: transaction.user.toString(),
balance: balanceResponse.tokenCredits,
balance: balance.tokenCredits,
[transaction.tokenType]: incrementValue,
};
};

View File

@@ -1,13 +1,9 @@
const mongoose = require('mongoose');
const { MongoMemoryServer } = require('mongodb-memory-server');
const { spendTokens, spendStructuredTokens } = require('./spendTokens');
const { getBalanceConfig } = require('~/server/services/Config');
const { getMultiplier, getCacheMultiplier } = require('./tx');
const { Transaction } = require('./Transaction');
const Balance = require('./Balance');
// Mock the custom config module so we can control the balance flag.
jest.mock('~/server/services/Config');
const { spendTokens, spendStructuredTokens } = require('./spendTokens');
const { getMultiplier, getCacheMultiplier } = require('./tx');
let mongoServer;
@@ -24,8 +20,6 @@ afterAll(async () => {
beforeEach(async () => {
await mongoose.connection.dropDatabase();
// Default: enable balance updates in tests.
getBalanceConfig.mockResolvedValue({ enabled: true });
});
describe('Regular Token Spending Tests', () => {
@@ -50,22 +44,34 @@ describe('Regular Token Spending Tests', () => {
};
// Act
process.env.CHECK_BALANCE = 'true';
await spendTokens(txData, tokenUsage);
// Assert
console.log('Initial Balance:', initialBalance);
const updatedBalance = await Balance.findOne({ user: userId });
console.log('Updated Balance:', updatedBalance.tokenCredits);
const promptMultiplier = getMultiplier({ model, tokenType: 'prompt' });
const completionMultiplier = getMultiplier({ model, tokenType: 'completion' });
const expectedTotalCost = 100 * promptMultiplier + 50 * completionMultiplier;
const expectedPromptCost = tokenUsage.promptTokens * promptMultiplier;
const expectedCompletionCost = tokenUsage.completionTokens * completionMultiplier;
const expectedTotalCost = expectedPromptCost + expectedCompletionCost;
const expectedBalance = initialBalance - expectedTotalCost;
expect(updatedBalance.tokenCredits).toBeLessThan(initialBalance);
expect(updatedBalance.tokenCredits).toBeCloseTo(expectedBalance, 0);
console.log('Expected Total Cost:', expectedTotalCost);
console.log('Actual Balance Decrease:', initialBalance - updatedBalance.tokenCredits);
});
test('spendTokens should handle zero completion tokens', async () => {
// Arrange
const userId = new mongoose.Types.ObjectId();
const initialBalance = 10000000;
const initialBalance = 10000000; // $10.00
await Balance.create({ user: userId, tokenCredits: initialBalance });
const model = 'gpt-3.5-turbo';
@@ -83,19 +89,24 @@ describe('Regular Token Spending Tests', () => {
};
// Act
process.env.CHECK_BALANCE = 'true';
await spendTokens(txData, tokenUsage);
// Assert
const updatedBalance = await Balance.findOne({ user: userId });
const promptMultiplier = getMultiplier({ model, tokenType: 'prompt' });
const expectedCost = 100 * promptMultiplier;
const expectedCost = tokenUsage.promptTokens * promptMultiplier;
expect(updatedBalance.tokenCredits).toBeCloseTo(initialBalance - expectedCost, 0);
console.log('Initial Balance:', initialBalance);
console.log('Updated Balance:', updatedBalance.tokenCredits);
console.log('Expected Cost:', expectedCost);
});
test('spendTokens should handle undefined token counts', async () => {
// Arrange
const userId = new mongoose.Types.ObjectId();
const initialBalance = 10000000;
const initialBalance = 10000000; // $10.00
await Balance.create({ user: userId, tokenCredits: initialBalance });
const model = 'gpt-3.5-turbo';
@@ -109,17 +120,14 @@ describe('Regular Token Spending Tests', () => {
const tokenUsage = {};
// Act
const result = await spendTokens(txData, tokenUsage);
// Assert: No transaction should be created
expect(result).toBeUndefined();
});
test('spendTokens should handle only prompt tokens', async () => {
// Arrange
const userId = new mongoose.Types.ObjectId();
const initialBalance = 10000000;
const initialBalance = 10000000; // $10.00
await Balance.create({ user: userId, tokenCredits: initialBalance });
const model = 'gpt-3.5-turbo';
@@ -133,44 +141,14 @@ describe('Regular Token Spending Tests', () => {
const tokenUsage = { promptTokens: 100 };
// Act
await spendTokens(txData, tokenUsage);
// Assert
const updatedBalance = await Balance.findOne({ user: userId });
const promptMultiplier = getMultiplier({ model, tokenType: 'prompt' });
const expectedCost = 100 * promptMultiplier;
expect(updatedBalance.tokenCredits).toBeCloseTo(initialBalance - expectedCost, 0);
});
test('spendTokens should not update balance when balance feature is disabled', async () => {
// Arrange: Override the config to disable balance updates.
getBalanceConfig.mockResolvedValue({ balance: { enabled: false } });
const userId = new mongoose.Types.ObjectId();
const initialBalance = 10000000;
await Balance.create({ user: userId, tokenCredits: initialBalance });
const model = 'gpt-3.5-turbo';
const txData = {
user: userId,
conversationId: 'test-conversation-id',
model,
context: 'test',
endpointTokenConfig: null,
};
const tokenUsage = {
promptTokens: 100,
completionTokens: 50,
};
// Act
await spendTokens(txData, tokenUsage);
// Assert: Balance should remain unchanged.
const updatedBalance = await Balance.findOne({ user: userId });
expect(updatedBalance.tokenCredits).toBe(initialBalance);
});
});
describe('Structured Token Spending Tests', () => {
@@ -186,7 +164,7 @@ describe('Structured Token Spending Tests', () => {
conversationId: 'c23a18da-706c-470a-ac28-ec87ed065199',
model,
context: 'message',
endpointTokenConfig: null,
endpointTokenConfig: null, // We'll use the default rates
};
const tokenUsage = {
@@ -198,15 +176,28 @@ describe('Structured Token Spending Tests', () => {
completionTokens: 5,
};
// Get the actual multipliers
const promptMultiplier = getMultiplier({ model, tokenType: 'prompt' });
const completionMultiplier = getMultiplier({ model, tokenType: 'completion' });
const writeMultiplier = getCacheMultiplier({ model, cacheType: 'write' });
const readMultiplier = getCacheMultiplier({ model, cacheType: 'read' });
console.log('Multipliers:', {
promptMultiplier,
completionMultiplier,
writeMultiplier,
readMultiplier,
});
// Act
process.env.CHECK_BALANCE = 'true';
const result = await spendStructuredTokens(txData, tokenUsage);
// Calculate expected costs.
// Assert
console.log('Initial Balance:', initialBalance);
console.log('Updated Balance:', result.completion.balance);
console.log('Transaction Result:', result);
const expectedPromptCost =
tokenUsage.promptTokens.input * promptMultiplier +
tokenUsage.promptTokens.write * writeMultiplier +
@@ -215,21 +206,37 @@ describe('Structured Token Spending Tests', () => {
const expectedTotalCost = expectedPromptCost + expectedCompletionCost;
const expectedBalance = initialBalance - expectedTotalCost;
// Assert
console.log('Expected Cost:', expectedTotalCost);
console.log('Expected Balance:', expectedBalance);
expect(result.completion.balance).toBeLessThan(initialBalance);
// Allow for a small difference (e.g., 100 token credits, which is $0.0001)
const allowedDifference = 100;
expect(Math.abs(result.completion.balance - expectedBalance)).toBeLessThan(allowedDifference);
// Check if the decrease is approximately as expected
const balanceDecrease = initialBalance - result.completion.balance;
expect(balanceDecrease).toBeCloseTo(expectedTotalCost, 0);
const expectedPromptTokenValue = -expectedPromptCost;
const expectedCompletionTokenValue = -expectedCompletionCost;
// Check token values
const expectedPromptTokenValue = -(
tokenUsage.promptTokens.input * promptMultiplier +
tokenUsage.promptTokens.write * writeMultiplier +
tokenUsage.promptTokens.read * readMultiplier
);
const expectedCompletionTokenValue = -tokenUsage.completionTokens * completionMultiplier;
expect(result.prompt.prompt).toBeCloseTo(expectedPromptTokenValue, 1);
expect(result.completion.completion).toBe(expectedCompletionTokenValue);
console.log('Expected prompt tokenValue:', expectedPromptTokenValue);
console.log('Actual prompt tokenValue:', result.prompt.prompt);
console.log('Expected completion tokenValue:', expectedCompletionTokenValue);
console.log('Actual completion tokenValue:', result.completion.completion);
});
test('should handle zero completion tokens in structured spending', async () => {
// Arrange
const userId = new mongoose.Types.ObjectId();
const initialBalance = 17613154.55;
await Balance.create({ user: userId, tokenCredits: initialBalance });
@@ -251,17 +258,15 @@ describe('Structured Token Spending Tests', () => {
completionTokens: 0,
};
// Act
process.env.CHECK_BALANCE = 'true';
const result = await spendStructuredTokens(txData, tokenUsage);
// Assert
expect(result.prompt).toBeDefined();
expect(result.completion).toBeUndefined();
expect(result.prompt.prompt).toBeLessThan(0);
});
test('should handle only prompt tokens in structured spending', async () => {
// Arrange
const userId = new mongoose.Types.ObjectId();
const initialBalance = 17613154.55;
await Balance.create({ user: userId, tokenCredits: initialBalance });
@@ -282,17 +287,15 @@ describe('Structured Token Spending Tests', () => {
},
};
// Act
process.env.CHECK_BALANCE = 'true';
const result = await spendStructuredTokens(txData, tokenUsage);
// Assert
expect(result.prompt).toBeDefined();
expect(result.completion).toBeUndefined();
expect(result.prompt.prompt).toBeLessThan(0);
});
test('should handle undefined token counts in structured spending', async () => {
// Arrange
const userId = new mongoose.Types.ObjectId();
const initialBalance = 17613154.55;
await Balance.create({ user: userId, tokenCredits: initialBalance });
@@ -307,10 +310,9 @@ describe('Structured Token Spending Tests', () => {
const tokenUsage = {};
// Act
process.env.CHECK_BALANCE = 'true';
const result = await spendStructuredTokens(txData, tokenUsage);
// Assert
expect(result).toEqual({
prompt: undefined,
completion: undefined,
@@ -318,7 +320,6 @@ describe('Structured Token Spending Tests', () => {
});
test('should handle incomplete context for completion tokens', async () => {
// Arrange
const userId = new mongoose.Types.ObjectId();
const initialBalance = 17613154.55;
await Balance.create({ user: userId, tokenCredits: initialBalance });
@@ -340,18 +341,15 @@ describe('Structured Token Spending Tests', () => {
completionTokens: 50,
};
// Act
process.env.CHECK_BALANCE = 'true';
const result = await spendStructuredTokens(txData, tokenUsage);
// Assert:
// (Assuming a multiplier for completion of 15 and a cancel rate of 1.15 as noted in the original test.)
expect(result.completion.completion).toBeCloseTo(-50 * 15 * 1.15, 0);
expect(result.completion.completion).toBeCloseTo(-50 * 15 * 1.15, 0); // Assuming multiplier is 15 and cancelRate is 1.15
});
});
describe('NaN Handling Tests', () => {
test('should skip transaction creation when rawAmount is NaN', async () => {
// Arrange
const userId = new mongoose.Types.ObjectId();
const initialBalance = 10000000;
await Balance.create({ user: userId, tokenCredits: initialBalance });
@@ -367,11 +365,9 @@ describe('NaN Handling Tests', () => {
tokenType: 'prompt',
};
// Act
const result = await Transaction.create(txData);
// Assert: No transaction should be created and balance remains unchanged.
expect(result).toBeUndefined();
const balance = await Balance.findOne({ user: userId });
expect(balance.tokenCredits).toBe(initialBalance);
});

View File

@@ -1,5 +1,5 @@
const mongoose = require('mongoose');
const { userSchema } = require('@librechat/data-schemas');
const userSchema = require('~/models/schema/userSchema');
const User = mongoose.model('User', userSchema);

View File

@@ -1,156 +0,0 @@
const { ViolationTypes } = require('librechat-data-provider');
const { Transaction } = require('./Transaction');
const { logViolation } = require('~/cache');
const { getMultiplier } = require('./tx');
const { logger } = require('~/config');
const Balance = require('./Balance');
function isInvalidDate(date) {
return isNaN(date);
}
/**
* Simple check method that calculates token cost and returns balance info.
* The auto-refill logic has been moved to balanceMethods.js to prevent circular dependencies.
*/
const checkBalanceRecord = async function ({
user,
model,
endpoint,
valueKey,
tokenType,
amount,
endpointTokenConfig,
}) {
const multiplier = getMultiplier({ valueKey, tokenType, model, endpoint, endpointTokenConfig });
const tokenCost = amount * multiplier;
// Retrieve the balance record
let record = await Balance.findOne({ user }).lean();
if (!record) {
logger.debug('[Balance.check] No balance record found for user', { user });
return {
canSpend: false,
balance: 0,
tokenCost,
};
}
let balance = record.tokenCredits;
logger.debug('[Balance.check] Initial state', {
user,
model,
endpoint,
valueKey,
tokenType,
amount,
balance,
multiplier,
endpointTokenConfig: !!endpointTokenConfig,
});
// Only perform auto-refill if spending would bring the balance to 0 or below
if (balance - tokenCost <= 0 && record.autoRefillEnabled && record.refillAmount > 0) {
const lastRefillDate = new Date(record.lastRefill);
const now = new Date();
if (
isInvalidDate(lastRefillDate) ||
now >=
addIntervalToDate(lastRefillDate, record.refillIntervalValue, record.refillIntervalUnit)
) {
try {
/** @type {{ rate: number, user: string, balance: number, transaction: import('@librechat/data-schemas').ITransaction}} */
const result = await Transaction.createAutoRefillTransaction({
user: user,
tokenType: 'credits',
context: 'autoRefill',
rawAmount: record.refillAmount,
});
balance = result.balance;
} catch (error) {
logger.error('[Balance.check] Failed to record transaction for auto-refill', error);
}
}
}
logger.debug('[Balance.check] Token cost', { tokenCost });
return { canSpend: balance >= tokenCost, balance, tokenCost };
};
/**
* Adds a time interval to a given date.
* @param {Date} date - The starting date.
* @param {number} value - The numeric value of the interval.
* @param {'seconds'|'minutes'|'hours'|'days'|'weeks'|'months'} unit - The unit of time.
* @returns {Date} A new Date representing the starting date plus the interval.
*/
const addIntervalToDate = (date, value, unit) => {
const result = new Date(date);
switch (unit) {
case 'seconds':
result.setSeconds(result.getSeconds() + value);
break;
case 'minutes':
result.setMinutes(result.getMinutes() + value);
break;
case 'hours':
result.setHours(result.getHours() + value);
break;
case 'days':
result.setDate(result.getDate() + value);
break;
case 'weeks':
result.setDate(result.getDate() + value * 7);
break;
case 'months':
result.setMonth(result.getMonth() + value);
break;
default:
break;
}
return result;
};
/**
* Checks the balance for a user and determines if they can spend a certain amount.
* If the user cannot spend the amount, it logs a violation and denies the request.
*
* @async
* @function
* @param {Object} params - The function parameters.
* @param {Express.Request} params.req - The Express request object.
* @param {Express.Response} params.res - The Express response object.
* @param {Object} params.txData - The transaction data.
* @param {string} params.txData.user - The user ID or identifier.
* @param {('prompt' | 'completion')} params.txData.tokenType - The type of token.
* @param {number} params.txData.amount - The amount of tokens.
* @param {string} params.txData.model - The model name or identifier.
* @param {string} [params.txData.endpointTokenConfig] - The token configuration for the endpoint.
* @returns {Promise<boolean>} Throws error if the user cannot spend the amount.
* @throws {Error} Throws an error if there's an issue with the balance check.
*/
const checkBalance = async ({ req, res, txData }) => {
const { canSpend, balance, tokenCost } = await checkBalanceRecord(txData);
if (canSpend) {
return true;
}
const type = ViolationTypes.TOKEN_BALANCE;
const errorMessage = {
type,
balance,
tokenCost,
promptTokens: txData.amount,
};
if (txData.generations && txData.generations.length > 0) {
errorMessage.generations = txData.generations;
}
await logViolation(req, res, type, errorMessage, 0);
throw new Error(JSON.stringify(errorMessage));
};
module.exports = {
checkBalance,
};

View File

@@ -0,0 +1,45 @@
const { ViolationTypes } = require('librechat-data-provider');
const { logViolation } = require('~/cache');
const Balance = require('./Balance');
/**
* Checks the balance for a user and determines if they can spend a certain amount.
* If the user cannot spend the amount, it logs a violation and denies the request.
*
* @async
* @function
* @param {Object} params - The function parameters.
* @param {Express.Request} params.req - The Express request object.
* @param {Express.Response} params.res - The Express response object.
* @param {Object} params.txData - The transaction data.
* @param {string} params.txData.user - The user ID or identifier.
* @param {('prompt' | 'completion')} params.txData.tokenType - The type of token.
* @param {number} params.txData.amount - The amount of tokens.
* @param {string} params.txData.model - The model name or identifier.
* @param {string} [params.txData.endpointTokenConfig] - The token configuration for the endpoint.
* @returns {Promise<boolean>} Returns true if the user can spend the amount, otherwise denies the request.
* @throws {Error} Throws an error if there's an issue with the balance check.
*/
const checkBalance = async ({ req, res, txData }) => {
const { canSpend, balance, tokenCost } = await Balance.check(txData);
if (canSpend) {
return true;
}
const type = ViolationTypes.TOKEN_BALANCE;
const errorMessage = {
type,
balance,
tokenCost,
promptTokens: txData.amount,
};
if (txData.generations && txData.generations.length > 0) {
errorMessage.generations = txData.generations;
}
await logViolation(req, res, type, errorMessage, 0);
throw new Error(JSON.stringify(errorMessage));
};
module.exports = checkBalance;

View File

@@ -1,32 +1,12 @@
const _ = require('lodash');
const mongoose = require('mongoose');
const { MeiliSearch } = require('meilisearch');
const { parseTextParts, ContentTypes } = require('librechat-data-provider');
const { cleanUpPrimaryKeyValue } = require('~/lib/utils/misc');
const logger = require('~/config/meiliLogger');
// Environment flags
/**
* Flag to indicate if search is enabled based on environment variables.
* @type {boolean}
*/
const searchEnabled = process.env.SEARCH && process.env.SEARCH.toLowerCase() === 'true';
/**
* Flag to indicate if MeiliSearch is enabled based on required environment variables.
* @type {boolean}
*/
const meiliEnabled = process.env.MEILI_HOST && process.env.MEILI_MASTER_KEY && searchEnabled;
/**
* Validates the required options for configuring the mongoMeili plugin.
*
* @param {Object} options - The configuration options.
* @param {string} options.host - The MeiliSearch host.
* @param {string} options.apiKey - The MeiliSearch API key.
* @param {string} options.indexName - The name of the index.
* @throws {Error} Throws an error if any required option is missing.
*/
const validateOptions = function (options) {
const requiredKeys = ['host', 'apiKey', 'indexName'];
requiredKeys.forEach((key) => {
@@ -36,64 +16,53 @@ const validateOptions = function (options) {
});
};
/**
* Factory function to create a MeiliMongooseModel class which extends a Mongoose model.
* This class contains static and instance methods to synchronize and manage the MeiliSearch index
* corresponding to the MongoDB collection.
*
* @param {Object} config - Configuration object.
* @param {Object} config.index - The MeiliSearch index object.
* @param {Array<string>} config.attributesToIndex - List of attributes to index.
* @returns {Function} A class definition that will be loaded into the Mongoose schema.
*/
// const createMeiliMongooseModel = function ({ index, indexName, client, attributesToIndex }) {
const createMeiliMongooseModel = function ({ index, attributesToIndex }) {
// The primary key is assumed to be the first attribute in the attributesToIndex array.
const primaryKey = attributesToIndex[0];
// MeiliMongooseModel is of type Mongoose.Model
class MeiliMongooseModel {
/**
* Synchronizes the data between the MongoDB collection and the MeiliSearch index.
* `syncWithMeili`: synchronizes the data between a MongoDB collection and a MeiliSearch index,
* only triggered if there's ever a discrepancy determined by `api\lib\db\indexSync.js`.
*
* The synchronization process involves:
* 1. Fetching all documents from the MongoDB collection and MeiliSearch index.
* 2. Comparing documents from both sources.
* 3. Deleting documents from MeiliSearch that no longer exist in MongoDB.
* 4. Adding documents to MeiliSearch that exist in MongoDB but not in the index.
* 5. Updating documents in MeiliSearch if key fields (such as `text` or `title`) differ.
* 6. Updating the `_meiliIndex` field in MongoDB to indicate the indexing status.
* 1. Fetches all documents from the MongoDB collection and the MeiliSearch index.
* 2. Compares the documents from both sources.
* 3. If a document exists in MeiliSearch but not in MongoDB, it's deleted from MeiliSearch.
* 4. If a document exists in MongoDB but not in MeiliSearch, it's added to MeiliSearch.
* 5. If a document exists in both but has different `text` or `title` fields (depending on the `primaryKey`), it's updated in MeiliSearch.
* 6. After all operations, it updates the `_meiliIndex` field in MongoDB to indicate whether the document is indexed in MeiliSearch.
*
* Note: The function processes documents in batches because MeiliSearch's
* `index.getDocuments` requires an exact limit and `index.addDocuments` does not handle
* partial failures in a batch.
* Note: This strategy does not use batch operations for Meilisearch as the `index.addDocuments` will discard
* the entire batch if there's an error with one document, and will not throw an error if there's an issue.
* Also, `index.getDocuments` needs an exact limit on the amount of documents to return, so we build the map in batches.
*
* @returns {Promise<void>} Resolves when the synchronization is complete.
* @returns {Promise} A promise that resolves when the synchronization is complete.
*
* @throws {Error} Throws an error if there's an issue with adding a document to MeiliSearch.
*/
static async syncWithMeili() {
try {
let moreDocuments = true;
// Retrieve all MongoDB documents from the collection as plain JavaScript objects.
const mongoDocuments = await this.find().lean();
const format = (doc) => _.pick(doc, attributesToIndex);
// Helper function to format a document by selecting only the attributes to index
// and omitting keys starting with '$'.
const format = (doc) =>
_.omitBy(_.pick(doc, attributesToIndex), (v, k) => k.startsWith('$'));
// Build a map of MongoDB documents for quick lookup based on the primary key.
// Prepare for comparison
const mongoMap = new Map(mongoDocuments.map((doc) => [doc[primaryKey], format(doc)]));
const indexMap = new Map();
let offset = 0;
const batchSize = 1000;
// Fetch documents from the MeiliSearch index in batches.
while (moreDocuments) {
const batch = await index.getDocuments({ limit: batchSize, offset });
if (batch.results.length === 0) {
moreDocuments = false;
}
for (const doc of batch.results) {
indexMap.set(doc[primaryKey], format(doc));
}
offset += batchSize;
}
@@ -101,12 +70,13 @@ const createMeiliMongooseModel = function ({ index, attributesToIndex }) {
const updateOps = [];
// Process documents present in the MeiliSearch index.
// Iterate over Meili index documents
for (const [id, doc] of indexMap) {
const update = {};
update[primaryKey] = id;
if (mongoMap.has(id)) {
// If document exists in MongoDB, check for discrepancies in key fields.
// Case: Update
// If document also exists in MongoDB, would be update case
if (
(doc.text && doc.text !== mongoMap.get(id).text) ||
(doc.title && doc.title !== mongoMap.get(id).title)
@@ -122,7 +92,8 @@ const createMeiliMongooseModel = function ({ index, attributesToIndex }) {
await index.addDocuments([doc]);
}
} else {
// If the document does not exist in MongoDB, delete it from MeiliSearch.
// Case: Delete
// If document does not exist in MongoDB, its a delete case from meili index
await index.deleteDocument(id);
updateOps.push({
updateOne: { filter: update, update: { $set: { _meiliIndex: false } } },
@@ -130,25 +101,24 @@ const createMeiliMongooseModel = function ({ index, attributesToIndex }) {
}
}
// Process documents present in MongoDB.
// Iterate over MongoDB documents
for (const [id, doc] of mongoMap) {
const update = {};
update[primaryKey] = id;
// If the document is missing in the Meili index, add it.
// Case: Insert
// If document does not exist in Meili Index, Its an insert case
if (!indexMap.has(id)) {
await index.addDocuments([doc]);
updateOps.push({
updateOne: { filter: update, update: { $set: { _meiliIndex: true } } },
});
} else if (doc._meiliIndex === false) {
// If the document exists but is marked as not indexed, update the flag.
updateOps.push({
updateOne: { filter: update, update: { $set: { _meiliIndex: true } } },
});
}
}
// Execute bulk update operations in MongoDB to update the _meiliIndex flags.
if (updateOps.length > 0) {
await this.collection.bulkWrite(updateOps);
logger.debug(
@@ -162,47 +132,34 @@ const createMeiliMongooseModel = function ({ index, attributesToIndex }) {
}
}
/**
* Updates settings for the MeiliSearch index.
*
* @param {Object} settings - The settings to update on the MeiliSearch index.
* @returns {Promise<Object>} Promise resolving to the update result.
*/
// Set one or more settings of the meili index
static async setMeiliIndexSettings(settings) {
return await index.updateSettings(settings);
}
/**
* Searches the MeiliSearch index and optionally populates the results with data from MongoDB.
*
* @param {string} q - The search query.
* @param {Object} params - Additional search parameters for MeiliSearch.
* @param {boolean} populate - Whether to populate search hits with full MongoDB documents.
* @returns {Promise<Object>} The search results with populated hits if requested.
*/
// Search the index
static async meiliSearch(q, params, populate) {
const data = await index.search(q, params);
// Populate hits with content from mongodb
if (populate) {
// Build a query using the primary key values from the search hits.
// Find objects into mongodb matching `objectID` from Meili search
const query = {};
// query[primaryKey] = { $in: _.map(data.hits, primaryKey) };
query[primaryKey] = _.map(data.hits, (hit) => cleanUpPrimaryKeyValue(hit[primaryKey]));
// logger.debug('query', query);
const hitsFromMongoose = await this.find(
query,
_.reduce(
this.schema.obj,
function (results, value, key) {
return { ...results, [key]: 1 };
},
{ _id: 1, __v: 1 },
),
).lean();
// Build a projection object, including only keys that do not start with '$'.
const projection = Object.keys(this.schema.obj).reduce(
(results, key) => {
if (!key.startsWith('$')) {
results[key] = 1;
}
return results;
},
{ _id: 1, __v: 1 },
);
// Retrieve the full documents from MongoDB.
const hitsFromMongoose = await this.find(query, projection).lean();
// Merge the MongoDB documents with the search hits.
// Add additional data from mongodb into Meili search hits
const populatedHits = data.hits.map(function (hit) {
const query = {};
query[primaryKey] = hit[primaryKey];
@@ -219,80 +176,51 @@ const createMeiliMongooseModel = function ({ index, attributesToIndex }) {
return data;
}
/**
* Preprocesses the current document for indexing.
*
* This method:
* - Picks only the defined attributes to index.
* - Omits any keys starting with '$'.
* - Replaces pipe characters ('|') in `conversationId` with '--'.
* - Extracts and concatenates text from an array of content items.
*
* @returns {Object} The preprocessed object ready for indexing.
*/
preprocessObjectForIndex() {
const object = _.omitBy(_.pick(this.toJSON(), attributesToIndex), (v, k) =>
k.startsWith('$'),
);
const object = _.pick(this.toJSON(), attributesToIndex);
// NOTE: MeiliSearch does not allow | in primary key, so we replace it with - for Bing convoIds
// object.conversationId = object.conversationId.replace(/\|/g, '-');
if (object.conversationId && object.conversationId.includes('|')) {
object.conversationId = object.conversationId.replace(/\|/g, '--');
}
if (object.content && Array.isArray(object.content)) {
object.text = parseTextParts(object.content);
object.text = object.content
.filter((item) => item.type === 'text' && item.text && item.text.value)
.map((item) => item.text.value)
.join(' ');
delete object.content;
}
return object;
}
/**
* Adds the current document to the MeiliSearch index.
*
* The method preprocesses the document, adds it to MeiliSearch, and then updates
* the MongoDB document's `_meiliIndex` flag to true.
*
* @returns {Promise<void>}
*/
// Push new document to Meili
async addObjectToMeili() {
const object = this.preprocessObjectForIndex();
try {
// logger.debug('Adding document to Meili', object);
await index.addDocuments([object]);
} catch (error) {
// Error handling can be enhanced as needed.
logger.error('[addObjectToMeili] Error adding document to Meili', error);
// logger.debug('Error adding document to Meili');
// logger.error(error);
}
await this.collection.updateMany({ _id: this._id }, { $set: { _meiliIndex: true } });
}
/**
* Updates the current document in the MeiliSearch index.
*
* @returns {Promise<void>}
*/
// Update an existing document in Meili
async updateObjectToMeili() {
const object = _.omitBy(_.pick(this.toJSON(), attributesToIndex), (v, k) =>
k.startsWith('$'),
);
const object = _.pick(this.toJSON(), attributesToIndex);
await index.updateDocuments([object]);
}
/**
* Deletes the current document from the MeiliSearch index.
*
* @returns {Promise<void>}
*/
// Delete a document from Meili
async deleteObjectFromMeili() {
await index.deleteDocument(this._id);
}
/**
* Post-save hook to synchronize the document with MeiliSearch.
*
* If the document is already indexed (i.e. `_meiliIndex` is true), it updates it;
* otherwise, it adds the document to the index.
*/
// * schema.post('save')
postSaveHook() {
if (this._meiliIndex) {
this.updateObjectToMeili();
@@ -301,24 +229,14 @@ const createMeiliMongooseModel = function ({ index, attributesToIndex }) {
}
}
/**
* Post-update hook to update the document in MeiliSearch.
*
* This hook is triggered after a document update, ensuring that changes are
* propagated to the MeiliSearch index if the document is indexed.
*/
// * schema.post('update')
postUpdateHook() {
if (this._meiliIndex) {
this.updateObjectToMeili();
}
}
/**
* Post-remove hook to delete the document from MeiliSearch.
*
* This hook is triggered after a document is removed, ensuring that the document
* is also removed from the MeiliSearch index if it was previously indexed.
*/
// * schema.post('remove')
postRemoveHook() {
if (this._meiliIndex) {
this.deleteObjectFromMeili();
@@ -329,27 +247,11 @@ const createMeiliMongooseModel = function ({ index, attributesToIndex }) {
return MeiliMongooseModel;
};
/**
* Mongoose plugin to synchronize MongoDB collections with a MeiliSearch index.
*
* This plugin:
* - Validates the provided options.
* - Adds a `_meiliIndex` field to the schema to track indexing status.
* - Sets up a MeiliSearch client and creates an index if it doesn't already exist.
* - Loads class methods for syncing, searching, and managing documents in MeiliSearch.
* - Registers Mongoose hooks (post-save, post-update, post-remove, etc.) to maintain index consistency.
*
* @param {mongoose.Schema} schema - The Mongoose schema to which the plugin is applied.
* @param {Object} options - Configuration options.
* @param {string} options.host - The MeiliSearch host.
* @param {string} options.apiKey - The MeiliSearch API key.
* @param {string} options.indexName - The name of the MeiliSearch index.
* @param {string} options.primaryKey - The primary key field for indexing.
*/
module.exports = function mongoMeili(schema, options) {
// Vaidate Options for mongoMeili
validateOptions(options);
// Add _meiliIndex field to the schema to track if a document has been indexed in MeiliSearch.
// Add meiliIndex to schema
schema.add({
_meiliIndex: {
type: Boolean,
@@ -361,77 +263,69 @@ module.exports = function mongoMeili(schema, options) {
const { host, apiKey, indexName, primaryKey } = options;
// Setup the MeiliSearch client.
// Setup MeiliSearch Client
const client = new MeiliSearch({ host, apiKey });
// Create the index asynchronously if it doesn't exist.
// Asynchronously create the index
client.createIndex(indexName, { primaryKey });
// Setup the MeiliSearch index for this schema.
// Setup the index to search for this schema
const index = client.index(indexName);
// Collect attributes from the schema that should be indexed.
const attributesToIndex = [
..._.reduce(
schema.obj,
function (results, value, key) {
return value.meiliIndex ? [...results, key] : results;
// }, []), '_id'];
},
[],
),
];
// Load the class methods into the schema.
schema.loadClass(createMeiliMongooseModel({ index, indexName, client, attributesToIndex }));
// Register Mongoose hooks to synchronize with MeiliSearch.
// Post-save: synchronize after a document is saved.
// Register hooks
schema.post('save', function (doc) {
doc.postSaveHook();
});
// Post-update: synchronize after a document is updated.
schema.post('update', function (doc) {
doc.postUpdateHook();
});
// Post-remove: synchronize after a document is removed.
schema.post('remove', function (doc) {
doc.postRemoveHook();
});
// Pre-deleteMany hook: remove corresponding documents from MeiliSearch when multiple documents are deleted.
schema.pre('deleteMany', async function (next) {
if (!meiliEnabled) {
return next();
next();
}
try {
// Check if the schema has a "messages" field to determine if it's a conversation schema.
if (Object.prototype.hasOwnProperty.call(schema.obj, 'messages')) {
const convoIndex = client.index('convos');
const deletedConvos = await mongoose.model('Conversation').find(this._conditions).lean();
const promises = deletedConvos.map((convo) =>
convoIndex.deleteDocument(convo.conversationId),
);
let promises = [];
for (const convo of deletedConvos) {
promises.push(convoIndex.deleteDocument(convo.conversationId));
}
await Promise.all(promises);
}
// Check if the schema has a "messageId" field to determine if it's a message schema.
if (Object.prototype.hasOwnProperty.call(schema.obj, 'messageId')) {
const messageIndex = client.index('messages');
const deletedMessages = await mongoose.model('Message').find(this._conditions).lean();
const promises = deletedMessages.map((message) =>
messageIndex.deleteDocument(message.messageId),
);
let promises = [];
for (const message of deletedMessages) {
promises.push(messageIndex.deleteDocument(message.messageId));
}
await Promise.all(promises);
}
return next();
} catch (error) {
if (meiliEnabled) {
logger.error(
'[MeiliMongooseModel.deleteMany] There was an issue deleting conversation indexes upon deletion. Next startup may be slow due to syncing.',
'[MeiliMongooseModel.deleteMany] There was an issue deleting conversation indexes upon deletion, next startup may be slow due to syncing',
error,
);
}
@@ -439,19 +333,17 @@ module.exports = function mongoMeili(schema, options) {
}
});
// Post-findOneAndUpdate hook: update MeiliSearch index after a document is updated via findOneAndUpdate.
schema.post('findOneAndUpdate', async function (doc) {
if (!meiliEnabled) {
return;
}
// If the document is unfinished, do not update the index.
if (doc.unfinished) {
return;
}
let meiliDoc;
// For conversation documents, try to fetch the document from the "convos" index.
// Doc is a Conversation
if (doc.messages) {
try {
meiliDoc = await client.index('convos').getDocument(doc.conversationId);
@@ -464,12 +356,10 @@ module.exports = function mongoMeili(schema, options) {
}
}
// If the MeiliSearch document exists and the title is unchanged, do nothing.
if (meiliDoc && meiliDoc.title === doc.title) {
return;
}
// Otherwise, trigger a post-save hook to synchronize the document.
doc.postSaveHook();
});
};

View File

@@ -0,0 +1,60 @@
const mongoose = require('mongoose');
const { Schema } = mongoose;
const AuthSchema = new Schema(
{
authorization_type: String,
custom_auth_header: String,
type: {
type: String,
enum: ['service_http', 'oauth', 'none'],
},
authorization_content_type: String,
authorization_url: String,
client_url: String,
scope: String,
token_exchange_method: {
type: String,
enum: ['default_post', 'basic_auth_header', null],
},
},
{ _id: false },
);
const actionSchema = new Schema({
user: {
type: mongoose.Schema.Types.ObjectId,
ref: 'User',
index: true,
required: true,
},
action_id: {
type: String,
index: true,
required: true,
},
type: {
type: String,
default: 'action_prototype',
},
settings: Schema.Types.Mixed,
agent_id: String,
assistant_id: String,
metadata: {
api_key: String, // private, encrypted
auth: AuthSchema,
domain: {
type: String,
required: true,
},
// json_schema: Schema.Types.Mixed,
privacy_policy_url: String,
raw_spec: String,
oauth_client_id: String, // private, encrypted
oauth_client_secret: String, // private, encrypted
},
});
// }, { minimize: false }); // Prevent removal of empty objects
module.exports = actionSchema;

View File

@@ -1,34 +1,6 @@
import { Schema, Document, Types } from 'mongoose';
export interface IAgent extends Omit<Document, 'model'> {
id: string;
name?: string;
description?: string;
instructions?: string;
avatar?: {
filepath: string;
source: string;
};
provider: string;
model: string;
model_parameters?: Record<string, unknown>;
artifacts?: string;
access_level?: number;
recursion_limit?: number;
tools?: string[];
tool_kwargs?: Array<unknown>;
actions?: string[];
author: Types.ObjectId;
authorName?: string;
hide_sequential_outputs?: boolean;
end_after_tools?: boolean;
agent_ids?: string[];
isCollaborative?: boolean;
conversation_starters?: string[];
tool_resources?: unknown;
projectIds?: Types.ObjectId[];
}
const mongoose = require('mongoose');
const agentSchema = new Schema<IAgent>(
const agentSchema = mongoose.Schema(
{
id: {
type: String,
@@ -46,7 +18,10 @@ const agentSchema = new Schema<IAgent>(
type: String,
},
avatar: {
type: Schema.Types.Mixed,
type: {
filepath: String,
source: String,
},
default: undefined,
},
provider: {
@@ -66,22 +41,19 @@ const agentSchema = new Schema<IAgent>(
access_level: {
type: Number,
},
recursion_limit: {
type: Number,
},
tools: {
type: [String],
default: undefined,
},
tool_kwargs: {
type: [{ type: Schema.Types.Mixed }],
type: [{ type: mongoose.Schema.Types.Mixed }],
},
actions: {
type: [String],
default: undefined,
},
author: {
type: Schema.Types.ObjectId,
type: mongoose.Schema.Types.ObjectId,
ref: 'User',
required: true,
},
@@ -107,11 +79,11 @@ const agentSchema = new Schema<IAgent>(
default: [],
},
tool_resources: {
type: Schema.Types.Mixed,
type: mongoose.Schema.Types.Mixed,
default: {},
},
projectIds: {
type: [Schema.Types.ObjectId],
type: [mongoose.Schema.Types.ObjectId],
ref: 'Project',
index: true,
},
@@ -121,4 +93,4 @@ const agentSchema = new Schema<IAgent>(
},
);
export default agentSchema;
module.exports = agentSchema;

View File

@@ -1,23 +1,9 @@
import { Schema, Document, Types } from 'mongoose';
const mongoose = require('mongoose');
export interface IAssistant extends Document {
user: Types.ObjectId;
assistant_id: string;
avatar?: {
filepath: string;
source: string;
};
conversation_starters?: string[];
access_level?: number;
file_ids?: string[];
actions?: string[];
append_current_datetime?: boolean;
}
const assistantSchema = new Schema<IAssistant>(
const assistantSchema = mongoose.Schema(
{
user: {
type: Schema.Types.ObjectId,
type: mongoose.Schema.Types.ObjectId,
ref: 'User',
required: true,
},
@@ -27,7 +13,10 @@ const assistantSchema = new Schema<IAssistant>(
required: true,
},
avatar: {
type: Schema.Types.Mixed,
type: {
filepath: String,
source: String,
},
default: undefined,
},
conversation_starters: {
@@ -49,4 +38,4 @@ const assistantSchema = new Schema<IAssistant>(
},
);
export default assistantSchema;
module.exports = assistantSchema;

View File

@@ -0,0 +1,17 @@
const mongoose = require('mongoose');
const balanceSchema = mongoose.Schema({
user: {
type: mongoose.Schema.Types.ObjectId,
ref: 'User',
index: true,
required: true,
},
// 1000 tokenCredits = 1 mill ($0.001 USD)
tokenCredits: {
type: Number,
default: 0,
},
});
module.exports = balanceSchema;

View File

@@ -1,15 +1,6 @@
import { Schema, Document } from 'mongoose';
const mongoose = require('mongoose');
export interface IBanner extends Document {
bannerId: string;
message: string;
displayFrom: Date;
displayTo?: Date;
type: 'banner' | 'popup';
isPublic: boolean;
}
const bannerSchema = new Schema<IBanner>(
const bannerSchema = mongoose.Schema(
{
bannerId: {
type: String,
@@ -37,7 +28,9 @@ const bannerSchema = new Schema<IBanner>(
default: false,
},
},
{ timestamps: true },
);
export default bannerSchema;
const Banner = mongoose.model('Banner', bannerSchema);
module.exports = Banner;

View File

@@ -0,0 +1,19 @@
const mongoose = require('mongoose');
const Schema = mongoose.Schema;
const categoriesSchema = new Schema({
label: {
type: String,
required: true,
unique: true,
},
value: {
type: String,
required: true,
unique: true,
},
});
const categories = mongoose.model('categories', categoriesSchema);
module.exports = { Categories: categories };

View File

@@ -0,0 +1,22 @@
const mongoose = require('mongoose');
const challengeSchema = mongoose.Schema({
userId: {
type: String,
required: true,
unique: true,
},
challenge: {
type: String,
required: true,
},
createdAt: {
type: Date,
default: Date.now,
index: {
expires: '5m',
},
},
});
module.exports = challengeSchema;

View File

@@ -0,0 +1,32 @@
const mongoose = require('mongoose');
const conversationTagSchema = mongoose.Schema(
{
tag: {
type: String,
index: true,
},
user: {
type: String,
index: true,
},
description: {
type: String,
index: true,
},
count: {
type: Number,
default: 0,
},
position: {
type: Number,
default: 0,
index: true,
},
},
{ timestamps: true },
);
conversationTagSchema.index({ tag: 1, user: 1 }, { unique: true });
module.exports = mongoose.model('ConversationTag', conversationTagSchema);

View File

@@ -1,18 +1,63 @@
const mongoose = require('mongoose');
const mongoMeili = require('../plugins/mongoMeili');
const { convoSchema } = require('@librechat/data-schemas');
const { conversationPreset } = require('./defaults');
const convoSchema = mongoose.Schema(
{
conversationId: {
type: String,
unique: true,
required: true,
index: true,
meiliIndex: true,
},
title: {
type: String,
default: 'New Chat',
meiliIndex: true,
},
user: {
type: String,
index: true,
},
messages: [{ type: mongoose.Schema.Types.ObjectId, ref: 'Message' }],
// google only
examples: { type: [{ type: mongoose.Schema.Types.Mixed }], default: undefined },
agentOptions: {
type: mongoose.Schema.Types.Mixed,
},
...conversationPreset,
agent_id: {
type: String,
},
tags: {
type: [String],
default: [],
meiliIndex: true,
},
files: {
type: [String],
},
expiredAt: {
type: Date,
},
},
{ timestamps: true },
);
if (process.env.MEILI_HOST && process.env.MEILI_MASTER_KEY) {
convoSchema.plugin(mongoMeili, {
host: process.env.MEILI_HOST,
apiKey: process.env.MEILI_MASTER_KEY,
/** Note: Will get created automatically if it doesn't exist already */
indexName: 'convos',
indexName: 'convos', // Will get created automatically if it doesn't exist already
primaryKey: 'conversationId',
});
}
// Create TTL index
convoSchema.index({ expiredAt: 1 }, { expireAfterSeconds: 0 });
convoSchema.index({ createdAt: 1, updatedAt: 1 });
convoSchema.index({ conversationId: 1, user: 1 }, { unique: true });
const Conversation = mongoose.models.Conversation || mongoose.model('Conversation', convoSchema);
module.exports = Conversation;

View File

@@ -1,7 +1,4 @@
import { Schema } from 'mongoose';
// @ts-ignore
export const conversationPreset = {
const conversationPreset = {
// endpoint: [azureOpenAI, openAI, anthropic, chatGPTBrowser]
endpoint: {
type: String,
@@ -27,7 +24,6 @@ export const conversationPreset = {
required: false,
},
// for google only
examples: { type: [{ type: Schema.Types.Mixed }], default: undefined },
modelLabel: {
type: String,
required: false,
@@ -57,10 +53,6 @@ export const conversationPreset = {
type: Number,
required: false,
},
maxTokens: {
type: Number,
required: false,
},
presence_penalty: {
type: Number,
required: false,
@@ -78,12 +70,6 @@ export const conversationPreset = {
promptCache: {
type: Boolean,
},
thinking: {
type: Boolean,
},
thinkingBudget: {
type: Number,
},
system: {
type: String,
},
@@ -136,3 +122,57 @@ export const conversationPreset = {
type: String,
},
};
const agentOptions = {
model: {
type: String,
required: false,
},
// for azureOpenAI, openAI only
chatGptLabel: {
type: String,
required: false,
},
modelLabel: {
type: String,
required: false,
},
promptPrefix: {
type: String,
required: false,
},
temperature: {
type: Number,
required: false,
},
top_p: {
type: Number,
required: false,
},
// for google only
topP: {
type: Number,
required: false,
},
topK: {
type: Number,
required: false,
},
maxOutputTokens: {
type: Number,
required: false,
},
presence_penalty: {
type: Number,
required: false,
},
frequency_penalty: {
type: Number,
required: false,
},
};
module.exports = {
conversationPreset,
agentOptions,
};

View File

@@ -0,0 +1,111 @@
const { FileSources } = require('librechat-data-provider');
const mongoose = require('mongoose');
/**
* @typedef {Object} MongoFile
* @property {ObjectId} [_id] - MongoDB Document ID
* @property {number} [__v] - MongoDB Version Key
* @property {ObjectId} user - User ID
* @property {string} [conversationId] - Optional conversation ID
* @property {string} file_id - File identifier
* @property {string} [temp_file_id] - Temporary File identifier
* @property {number} bytes - Size of the file in bytes
* @property {string} filename - Name of the file
* @property {string} filepath - Location of the file
* @property {'file'} object - Type of object, always 'file'
* @property {string} type - Type of file
* @property {number} [usage=0] - Number of uses of the file
* @property {string} [context] - Context of the file origin
* @property {boolean} [embedded=false] - Whether or not the file is embedded in vector db
* @property {string} [model] - The model to identify the group region of the file (for Azure OpenAI hosting)
* @property {string} [source] - The source of the file (e.g., from FileSources)
* @property {number} [width] - Optional width of the file
* @property {number} [height] - Optional height of the file
* @property {Object} [metadata] - Metadata related to the file
* @property {string} [metadata.fileIdentifier] - Unique identifier for the file in metadata
* @property {Date} [expiresAt] - Optional expiration date of the file
* @property {Date} [createdAt] - Date when the file was created
* @property {Date} [updatedAt] - Date when the file was updated
*/
/** @type {MongooseSchema<MongoFile>} */
const fileSchema = mongoose.Schema(
{
user: {
type: mongoose.Schema.Types.ObjectId,
ref: 'User',
index: true,
required: true,
},
conversationId: {
type: String,
ref: 'Conversation',
index: true,
},
file_id: {
type: String,
// required: true,
index: true,
},
temp_file_id: {
type: String,
// required: true,
},
bytes: {
type: Number,
required: true,
},
filename: {
type: String,
required: true,
},
filepath: {
type: String,
required: true,
},
object: {
type: String,
required: true,
default: 'file',
},
embedded: {
type: Boolean,
},
type: {
type: String,
required: true,
},
context: {
type: String,
// required: true,
},
usage: {
type: Number,
required: true,
default: 0,
},
source: {
type: String,
default: FileSources.local,
},
model: {
type: String,
},
width: Number,
height: Number,
metadata: {
fileIdentifier: String,
},
expiresAt: {
type: Date,
expires: 3600, // 1 hour in seconds
},
},
{
timestamps: true,
},
);
fileSchema.index({ createdAt: 1, updatedAt: 1 });
module.exports = fileSchema;

View File

@@ -1,13 +1,6 @@
import mongoose, { Schema, Document, Types } from 'mongoose';
const mongoose = require('mongoose');
export interface IKey extends Document {
userId: Types.ObjectId;
name: string;
value: string;
expiresAt?: Date;
}
const keySchema: Schema<IKey> = new Schema({
const keySchema = mongoose.Schema({
userId: {
type: mongoose.Schema.Types.ObjectId,
ref: 'User',
@@ -28,4 +21,4 @@ const keySchema: Schema<IKey> = new Schema({
keySchema.index({ expiresAt: 1 }, { expireAfterSeconds: 0 });
export default keySchema;
module.exports = keySchema;

View File

@@ -1,6 +1,145 @@
const mongoose = require('mongoose');
const mongoMeili = require('~/models/plugins/mongoMeili');
const { messageSchema } = require('@librechat/data-schemas');
const messageSchema = mongoose.Schema(
{
messageId: {
type: String,
unique: true,
required: true,
index: true,
meiliIndex: true,
},
conversationId: {
type: String,
index: true,
required: true,
meiliIndex: true,
},
user: {
type: String,
index: true,
required: true,
default: null,
},
model: {
type: String,
default: null,
},
endpoint: {
type: String,
},
conversationSignature: {
type: String,
},
clientId: {
type: String,
},
invocationId: {
type: Number,
},
parentMessageId: {
type: String,
},
tokenCount: {
type: Number,
},
summaryTokenCount: {
type: Number,
},
sender: {
type: String,
meiliIndex: true,
},
text: {
type: String,
meiliIndex: true,
},
summary: {
type: String,
},
isCreatedByUser: {
type: Boolean,
required: true,
default: false,
},
unfinished: {
type: Boolean,
default: false,
},
error: {
type: Boolean,
default: false,
},
finish_reason: {
type: String,
},
_meiliIndex: {
type: Boolean,
required: false,
select: false,
default: false,
},
files: { type: [{ type: mongoose.Schema.Types.Mixed }], default: undefined },
plugin: {
type: {
latest: {
type: String,
required: false,
},
inputs: {
type: [mongoose.Schema.Types.Mixed],
required: false,
default: undefined,
},
outputs: {
type: String,
required: false,
},
},
default: undefined,
},
plugins: { type: [{ type: mongoose.Schema.Types.Mixed }], default: undefined },
content: {
type: [{ type: mongoose.Schema.Types.Mixed }],
default: undefined,
meiliIndex: true,
},
thread_id: {
type: String,
},
/* frontend components */
iconURL: {
type: String,
},
attachments: { type: [{ type: mongoose.Schema.Types.Mixed }], default: undefined },
/*
attachments: {
type: [
{
file_id: String,
filename: String,
filepath: String,
expiresAt: Date,
width: Number,
height: Number,
type: String,
conversationId: String,
messageId: {
type: String,
required: true,
},
toolCallId: String,
},
],
default: undefined,
},
*/
expiredAt: {
type: Date,
},
},
{ timestamps: true },
);
if (process.env.MEILI_HOST && process.env.MEILI_MASTER_KEY) {
messageSchema.plugin(mongoMeili, {
@@ -10,7 +149,11 @@ if (process.env.MEILI_HOST && process.env.MEILI_MASTER_KEY) {
primaryKey: 'messageId',
});
}
messageSchema.index({ expiredAt: 1 }, { expireAfterSeconds: 0 });
messageSchema.index({ createdAt: 1 });
messageSchema.index({ messageId: 1, user: 1 }, { unique: true });
/** @type {mongoose.Model<TMessage>} */
const Message = mongoose.models.Message || mongoose.model('Message', messageSchema);
module.exports = Message;

View File

@@ -1,5 +1,25 @@
const mongoose = require('mongoose');
const { pluginAuthSchema } = require('@librechat/data-schemas');
const pluginAuthSchema = mongoose.Schema(
{
authField: {
type: String,
required: true,
},
value: {
type: String,
required: true,
},
userId: {
type: String,
required: true,
},
pluginKey: {
type: String,
},
},
{ timestamps: true },
);
const PluginAuth = mongoose.models.Plugin || mongoose.model('PluginAuth', pluginAuthSchema);

View File

@@ -1,5 +1,38 @@
const mongoose = require('mongoose');
const { presetSchema } = require('@librechat/data-schemas');
const { conversationPreset } = require('./defaults');
const presetSchema = mongoose.Schema(
{
presetId: {
type: String,
unique: true,
required: true,
index: true,
},
title: {
type: String,
default: 'New Chat',
meiliIndex: true,
},
user: {
type: String,
default: null,
},
defaultPreset: {
type: Boolean,
},
order: {
type: Number,
},
// google only
examples: [{ type: mongoose.Schema.Types.Mixed }],
...conversationPreset,
agentOptions: {
type: mongoose.Schema.Types.Mixed,
default: null,
},
},
{ timestamps: true },
);
const Preset = mongoose.models.Preset || mongoose.model('Preset', presetSchema);

View File

@@ -0,0 +1,35 @@
const { Schema } = require('mongoose');
/**
* @typedef {Object} MongoProject
* @property {ObjectId} [_id] - MongoDB Document ID
* @property {string} name - The name of the project
* @property {ObjectId[]} promptGroupIds - Array of PromptGroup IDs associated with the project
* @property {Date} [createdAt] - Date when the project was created (added by timestamps)
* @property {Date} [updatedAt] - Date when the project was last updated (added by timestamps)
*/
const projectSchema = new Schema(
{
name: {
type: String,
required: true,
index: true,
},
promptGroupIds: {
type: [Schema.Types.ObjectId],
ref: 'PromptGroup',
default: [],
},
agentIds: {
type: [String],
ref: 'Agent',
default: [],
},
},
{
timestamps: true,
},
);
module.exports = projectSchema;

View File

@@ -0,0 +1,118 @@
const mongoose = require('mongoose');
const { Constants } = require('librechat-data-provider');
const Schema = mongoose.Schema;
/**
* @typedef {Object} MongoPromptGroup
* @property {ObjectId} [_id] - MongoDB Document ID
* @property {string} name - The name of the prompt group
* @property {ObjectId} author - The author of the prompt group
* @property {ObjectId} [projectId=null] - The project ID of the prompt group
* @property {ObjectId} [productionId=null] - The project ID of the prompt group
* @property {string} authorName - The name of the author of the prompt group
* @property {number} [numberOfGenerations=0] - Number of generations the prompt group has
* @property {string} [oneliner=''] - Oneliner description of the prompt group
* @property {string} [category=''] - Category of the prompt group
* @property {string} [command] - Command for the prompt group
* @property {Date} [createdAt] - Date when the prompt group was created (added by timestamps)
* @property {Date} [updatedAt] - Date when the prompt group was last updated (added by timestamps)
*/
const promptGroupSchema = new Schema(
{
name: {
type: String,
required: true,
index: true,
},
numberOfGenerations: {
type: Number,
default: 0,
},
oneliner: {
type: String,
default: '',
},
category: {
type: String,
default: '',
index: true,
},
projectIds: {
type: [Schema.Types.ObjectId],
ref: 'Project',
index: true,
},
productionId: {
type: Schema.Types.ObjectId,
ref: 'Prompt',
required: true,
index: true,
},
author: {
type: Schema.Types.ObjectId,
ref: 'User',
required: true,
index: true,
},
authorName: {
type: String,
required: true,
},
command: {
type: String,
index: true,
validate: {
validator: function (v) {
return v === undefined || v === null || v === '' || /^[a-z0-9-]+$/.test(v);
},
message: (props) =>
`${props.value} is not a valid command. Only lowercase alphanumeric characters and highfins (') are allowed.`,
},
maxlength: [
Constants.COMMANDS_MAX_LENGTH,
`Command cannot be longer than ${Constants.COMMANDS_MAX_LENGTH} characters`,
],
},
},
{
timestamps: true,
},
);
const PromptGroup = mongoose.model('PromptGroup', promptGroupSchema);
const promptSchema = new Schema(
{
groupId: {
type: Schema.Types.ObjectId,
ref: 'PromptGroup',
required: true,
index: true,
},
author: {
type: Schema.Types.ObjectId,
ref: 'User',
required: true,
},
prompt: {
type: String,
required: true,
},
type: {
type: String,
enum: ['text', 'chat'],
required: true,
},
},
{
timestamps: true,
},
);
const Prompt = mongoose.model('Prompt', promptSchema);
promptSchema.index({ createdAt: 1, updatedAt: 1 });
promptGroupSchema.index({ createdAt: 1, updatedAt: 1 });
module.exports = { Prompt, PromptGroup };

View File

@@ -0,0 +1,55 @@
const { PermissionTypes, Permissions } = require('librechat-data-provider');
const mongoose = require('mongoose');
const roleSchema = new mongoose.Schema({
name: {
type: String,
required: true,
unique: true,
index: true,
},
[PermissionTypes.BOOKMARKS]: {
[Permissions.USE]: {
type: Boolean,
default: true,
},
},
[PermissionTypes.PROMPTS]: {
[Permissions.SHARED_GLOBAL]: {
type: Boolean,
default: false,
},
[Permissions.USE]: {
type: Boolean,
default: true,
},
[Permissions.CREATE]: {
type: Boolean,
default: true,
},
},
[PermissionTypes.AGENTS]: {
[Permissions.SHARED_GLOBAL]: {
type: Boolean,
default: false,
},
[Permissions.USE]: {
type: Boolean,
default: true,
},
[Permissions.CREATE]: {
type: Boolean,
default: true,
},
},
[PermissionTypes.MULTI_CONVO]: {
[Permissions.USE]: {
type: Boolean,
default: true,
},
},
});
const Role = mongoose.model('Role', roleSchema);
module.exports = Role;

View File

@@ -0,0 +1,20 @@
const mongoose = require('mongoose');
const sessionSchema = mongoose.Schema({
refreshTokenHash: {
type: String,
required: true,
},
expiration: {
type: Date,
required: true,
expires: 0,
},
user: {
type: mongoose.Schema.Types.ObjectId,
ref: 'User',
required: true,
},
});
module.exports = sessionSchema;

View File

@@ -1,17 +1,6 @@
import mongoose, { Schema, Document, Types } from 'mongoose';
const mongoose = require('mongoose');
export interface ISharedLink extends Document {
conversationId: string;
title?: string;
user?: string;
messages?: Types.ObjectId[];
shareId?: string;
isPublic: boolean;
createdAt?: Date;
updatedAt?: Date;
}
const shareSchema: Schema<ISharedLink> = new Schema(
const shareSchema = mongoose.Schema(
{
conversationId: {
type: String,
@@ -38,4 +27,4 @@ const shareSchema: Schema<ISharedLink> = new Schema(
{ timestamps: true },
);
export default shareSchema;
module.exports = mongoose.model('SharedLink', shareSchema);

View File

@@ -1,17 +1,7 @@
import { Schema, Document, Types } from 'mongoose';
const mongoose = require('mongoose');
const Schema = mongoose.Schema;
export interface IToken extends Document {
userId: Types.ObjectId;
email?: string;
type?: string;
identifier?: string;
token: string;
createdAt: Date;
expiresAt: Date;
metadata?: Map<string, unknown>;
}
const tokenSchema: Schema<IToken> = new Schema({
const tokenSchema = new Schema({
userId: {
type: Schema.Types.ObjectId,
required: true,
@@ -20,9 +10,7 @@ const tokenSchema: Schema<IToken> = new Schema({
email: {
type: String,
},
type: {
type: String,
},
type: String,
identifier: {
type: String,
},
@@ -47,4 +35,4 @@ const tokenSchema: Schema<IToken> = new Schema({
tokenSchema.index({ expiresAt: 1 }, { expireAfterSeconds: 0 });
export default tokenSchema;
module.exports = tokenSchema;

View File

@@ -0,0 +1,54 @@
const mongoose = require('mongoose');
/**
* @typedef {Object} ToolCallData
* @property {string} conversationId - The ID of the conversation
* @property {string} messageId - The ID of the message
* @property {string} toolId - The ID of the tool
* @property {string | ObjectId} user - The user's ObjectId
* @property {unknown} [result] - Optional result data
* @property {TAttachment[]} [attachments] - Optional attachments data
* @property {number} [blockIndex] - Optional code block index
* @property {number} [partIndex] - Optional part index
*/
/** @type {MongooseSchema<ToolCallData>} */
const toolCallSchema = mongoose.Schema(
{
conversationId: {
type: String,
required: true,
},
messageId: {
type: String,
required: true,
},
toolId: {
type: String,
required: true,
},
user: {
type: mongoose.Schema.Types.ObjectId,
ref: 'User',
required: true,
},
result: {
type: mongoose.Schema.Types.Mixed,
},
attachments: {
type: mongoose.Schema.Types.Mixed,
},
blockIndex: {
type: Number,
},
partIndex: {
type: Number,
},
},
{ timestamps: true },
);
toolCallSchema.index({ messageId: 1, user: 1 });
toolCallSchema.index({ conversationId: 1, user: 1 });
module.exports = mongoose.model('ToolCall', toolCallSchema);

View File

@@ -1,24 +1,6 @@
import mongoose, { Schema, Document, Types } from 'mongoose';
const mongoose = require('mongoose');
// @ts-ignore
export interface ITransaction extends Document {
user: Types.ObjectId;
conversationId?: string;
tokenType: 'prompt' | 'completion' | 'credits';
model?: string;
context?: string;
valueKey?: string;
rate?: number;
rawAmount?: number;
tokenValue?: number;
inputTokens?: number;
writeTokens?: number;
readTokens?: number;
createdAt?: Date;
updatedAt?: Date;
}
const transactionSchema: Schema<ITransaction> = new Schema(
const transactionSchema = mongoose.Schema(
{
user: {
type: mongoose.Schema.Types.ObjectId,
@@ -57,4 +39,4 @@ const transactionSchema: Schema<ITransaction> = new Schema(
},
);
export default transactionSchema;
module.exports = transactionSchema;

View File

@@ -0,0 +1,162 @@
const mongoose = require('mongoose');
const { SystemRoles } = require('librechat-data-provider');
/**
* @typedef {Object} MongoSession
* @property {string} [refreshToken] - The refresh token
*/
/**
* @typedef {Object} MongoUser
* @property {ObjectId} [_id] - MongoDB Document ID
* @property {string} [name] - The user's name
* @property {string} [username] - The user's username, in lowercase
* @property {string} email - The user's email address
* @property {boolean} emailVerified - Whether the user's email is verified
* @property {string} [password] - The user's password, trimmed with 8-128 characters
* @property {string} [avatar] - The URL of the user's avatar
* @property {string} provider - The provider of the user's account (e.g., 'local', 'google')
* @property {string} [role='USER'] - The role of the user
* @property {string} [googleId] - Optional Google ID for the user
* @property {string} [facebookId] - Optional Facebook ID for the user
* @property {string} [openidId] - Optional OpenID ID for the user
* @property {string} [ldapId] - Optional LDAP ID for the user
* @property {string} [githubId] - Optional GitHub ID for the user
* @property {string} [discordId] - Optional Discord ID for the user
* @property {string} [appleId] - Optional Apple ID for the user
* @property {Array} [plugins=[]] - List of plugins used by the user
* @property {Array.<MongoSession>} [refreshToken] - List of sessions with refresh tokens
* @property {Date} [expiresAt] - Optional expiration date of the file
* @property {Date} [createdAt] - Date when the user was created (added by timestamps)
* @property {Date} [updatedAt] - Date when the user was last updated (added by timestamps)
*/
/** @type {MongooseSchema<MongoSession>} */
const Session = mongoose.Schema({
refreshToken: {
type: String,
default: '',
},
});
const backupCodeSchema = mongoose.Schema({
codeHash: { type: String, required: true },
used: { type: Boolean, default: false },
usedAt: { type: Date, default: null },
});
const passkeySchema = mongoose.Schema({
id: { type: String, required: true },
publicKey: { type: Buffer, required: true },
counter: { type: Number, default: 0 },
transports: { type: [String], default: [] },
});
/** @type {MongooseSchema<MongoUser>} */
const userSchema = mongoose.Schema(
{
name: {
type: String,
},
username: {
type: String,
lowercase: true,
default: '',
},
email: {
type: String,
required: [true, 'can\'t be blank'],
lowercase: true,
unique: true,
match: [/\S+@\S+\.\S+/, 'is invalid'],
index: true,
},
emailVerified: {
type: Boolean,
required: true,
default: false,
},
password: {
type: String,
trim: true,
minlength: 8,
maxlength: 128,
},
avatar: {
type: String,
required: false,
},
provider: {
type: String,
required: true,
default: 'local',
},
role: {
type: String,
default: SystemRoles.USER,
},
googleId: {
type: String,
unique: true,
sparse: true,
},
facebookId: {
type: String,
unique: true,
sparse: true,
},
openidId: {
type: String,
unique: true,
sparse: true,
},
ldapId: {
type: String,
unique: true,
sparse: true,
},
githubId: {
type: String,
unique: true,
sparse: true,
},
discordId: {
type: String,
unique: true,
sparse: true,
},
appleId: {
type: String,
unique: true,
sparse: true,
},
passkeys: {
type: [passkeySchema],
default: [],
},
plugins: {
type: Array,
},
totpSecret: {
type: String,
},
backupCodes: {
type: [backupCodeSchema],
},
refreshToken: {
type: [Session],
},
expiresAt: {
type: Date,
expires: 604800, // 7 days in seconds
},
termsAccepted: {
type: Boolean,
default: false,
},
},
{ timestamps: true },
);
module.exports = userSchema;

View File

@@ -36,7 +36,7 @@ const spendTokens = async (txData, tokenUsage) => {
prompt = await Transaction.create({
...txData,
tokenType: 'prompt',
rawAmount: promptTokens === 0 ? 0 : -Math.max(promptTokens, 0),
rawAmount: -Math.max(promptTokens, 0),
});
}
@@ -44,7 +44,7 @@ const spendTokens = async (txData, tokenUsage) => {
completion = await Transaction.create({
...txData,
tokenType: 'completion',
rawAmount: completionTokens === 0 ? 0 : -Math.max(completionTokens, 0),
rawAmount: -Math.max(completionTokens, 0),
});
}

View File

@@ -1,10 +1,17 @@
const mongoose = require('mongoose');
const { MongoMemoryServer } = require('mongodb-memory-server');
const { Transaction } = require('./Transaction');
const Balance = require('./Balance');
const { spendTokens, spendStructuredTokens } = require('./spendTokens');
// Mock the logger to prevent console output during tests
jest.mock('./Transaction', () => ({
Transaction: {
create: jest.fn(),
createStructured: jest.fn(),
},
}));
jest.mock('./Balance', () => ({
findOne: jest.fn(),
findOneAndUpdate: jest.fn(),
}));
jest.mock('~/config', () => ({
logger: {
debug: jest.fn(),
@@ -12,46 +19,19 @@ jest.mock('~/config', () => ({
},
}));
// Mock the Config service
const { getBalanceConfig } = require('~/server/services/Config');
jest.mock('~/server/services/Config');
// Import after mocking
const { spendTokens, spendStructuredTokens } = require('./spendTokens');
const { Transaction } = require('./Transaction');
const Balance = require('./Balance');
describe('spendTokens', () => {
let mongoServer;
let userId;
beforeAll(async () => {
mongoServer = await MongoMemoryServer.create();
const mongoUri = mongoServer.getUri();
await mongoose.connect(mongoUri);
});
afterAll(async () => {
await mongoose.disconnect();
await mongoServer.stop();
});
beforeEach(async () => {
// Clear collections before each test
await Transaction.deleteMany({});
await Balance.deleteMany({});
// Create a new user ID for each test
userId = new mongoose.Types.ObjectId();
// Mock the balance config to be enabled by default
getBalanceConfig.mockResolvedValue({ enabled: true });
beforeEach(() => {
jest.clearAllMocks();
process.env.CHECK_BALANCE = 'true';
});
it('should create transactions for both prompt and completion tokens', async () => {
// Create a balance for the user
await Balance.create({
user: userId,
tokenCredits: 10000,
});
const txData = {
user: userId,
user: new mongoose.Types.ObjectId(),
conversationId: 'test-convo',
model: 'gpt-3.5-turbo',
context: 'test',
@@ -61,35 +41,31 @@ describe('spendTokens', () => {
completionTokens: 50,
};
Transaction.create.mockResolvedValueOnce({ tokenType: 'prompt', rawAmount: -100 });
Transaction.create.mockResolvedValueOnce({ tokenType: 'completion', rawAmount: -50 });
Balance.findOne.mockResolvedValue({ tokenCredits: 10000 });
Balance.findOneAndUpdate.mockResolvedValue({ tokenCredits: 9850 });
await spendTokens(txData, tokenUsage);
// Verify transactions were created
const transactions = await Transaction.find({ user: userId }).sort({ tokenType: 1 });
expect(transactions).toHaveLength(2);
// Check completion transaction
expect(transactions[0].tokenType).toBe('completion');
expect(transactions[0].rawAmount).toBe(-50);
// Check prompt transaction
expect(transactions[1].tokenType).toBe('prompt');
expect(transactions[1].rawAmount).toBe(-100);
// Verify balance was updated
const balance = await Balance.findOne({ user: userId });
expect(balance).toBeDefined();
expect(balance.tokenCredits).toBeLessThan(10000); // Balance should be reduced
expect(Transaction.create).toHaveBeenCalledTimes(2);
expect(Transaction.create).toHaveBeenCalledWith(
expect.objectContaining({
tokenType: 'prompt',
rawAmount: -100,
}),
);
expect(Transaction.create).toHaveBeenCalledWith(
expect.objectContaining({
tokenType: 'completion',
rawAmount: -50,
}),
);
});
it('should handle zero completion tokens', async () => {
// Create a balance for the user
await Balance.create({
user: userId,
tokenCredits: 10000,
});
const txData = {
user: userId,
user: new mongoose.Types.ObjectId(),
conversationId: 'test-convo',
model: 'gpt-3.5-turbo',
context: 'test',
@@ -99,26 +75,31 @@ describe('spendTokens', () => {
completionTokens: 0,
};
Transaction.create.mockResolvedValueOnce({ tokenType: 'prompt', rawAmount: -100 });
Transaction.create.mockResolvedValueOnce({ tokenType: 'completion', rawAmount: -0 });
Balance.findOne.mockResolvedValue({ tokenCredits: 10000 });
Balance.findOneAndUpdate.mockResolvedValue({ tokenCredits: 9850 });
await spendTokens(txData, tokenUsage);
// Verify transactions were created
const transactions = await Transaction.find({ user: userId }).sort({ tokenType: 1 });
expect(transactions).toHaveLength(2);
// Check completion transaction
expect(transactions[0].tokenType).toBe('completion');
// In JavaScript -0 and 0 are different but functionally equivalent
// Use Math.abs to handle both 0 and -0
expect(Math.abs(transactions[0].rawAmount)).toBe(0);
// Check prompt transaction
expect(transactions[1].tokenType).toBe('prompt');
expect(transactions[1].rawAmount).toBe(-100);
expect(Transaction.create).toHaveBeenCalledTimes(2);
expect(Transaction.create).toHaveBeenCalledWith(
expect.objectContaining({
tokenType: 'prompt',
rawAmount: -100,
}),
);
expect(Transaction.create).toHaveBeenCalledWith(
expect.objectContaining({
tokenType: 'completion',
rawAmount: -0, // Changed from 0 to -0
}),
);
});
it('should handle undefined token counts', async () => {
const txData = {
user: userId,
user: new mongoose.Types.ObjectId(),
conversationId: 'test-convo',
model: 'gpt-3.5-turbo',
context: 'test',
@@ -127,22 +108,13 @@ describe('spendTokens', () => {
await spendTokens(txData, tokenUsage);
// Verify no transactions were created
const transactions = await Transaction.find({ user: userId });
expect(transactions).toHaveLength(0);
expect(Transaction.create).not.toHaveBeenCalled();
});
it('should not update balance when the balance feature is disabled', async () => {
// Override configuration: disable balance updates
getBalanceConfig.mockResolvedValue({ enabled: false });
// Create a balance for the user
await Balance.create({
user: userId,
tokenCredits: 10000,
});
it('should not update balance when CHECK_BALANCE is false', async () => {
process.env.CHECK_BALANCE = 'false';
const txData = {
user: userId,
user: new mongoose.Types.ObjectId(),
conversationId: 'test-convo',
model: 'gpt-3.5-turbo',
context: 'test',
@@ -152,529 +124,19 @@ describe('spendTokens', () => {
completionTokens: 50,
};
await spendTokens(txData, tokenUsage);
// Verify transactions were created
const transactions = await Transaction.find({ user: userId });
expect(transactions).toHaveLength(2);
// Verify balance was not updated (should still be 10000)
const balance = await Balance.findOne({ user: userId });
expect(balance.tokenCredits).toBe(10000);
});
it('should not allow balance to go below zero when spending tokens', async () => {
// Create a balance with a low amount
await Balance.create({
user: userId,
tokenCredits: 5000,
});
const txData = {
user: userId,
conversationId: 'test-convo',
model: 'gpt-4', // Using a more expensive model
context: 'test',
};
// Spending more tokens than the user has balance for
const tokenUsage = {
promptTokens: 1000,
completionTokens: 500,
};
Transaction.create.mockResolvedValueOnce({ tokenType: 'prompt', rawAmount: -100 });
Transaction.create.mockResolvedValueOnce({ tokenType: 'completion', rawAmount: -50 });
await spendTokens(txData, tokenUsage);
// Verify transactions were created
const transactions = await Transaction.find({ user: userId }).sort({ tokenType: 1 });
expect(transactions).toHaveLength(2);
// Verify balance was reduced to exactly 0, not negative
const balance = await Balance.findOne({ user: userId });
expect(balance).toBeDefined();
expect(balance.tokenCredits).toBe(0);
// Check that the transaction records show the adjusted values
const transactionResults = await Promise.all(
transactions.map((t) =>
Transaction.create({
...txData,
tokenType: t.tokenType,
rawAmount: t.rawAmount,
}),
),
);
// The second transaction should have an adjusted value since balance is already 0
expect(transactionResults[1]).toEqual(
expect.objectContaining({
balance: 0,
}),
);
});
it('should handle multiple transactions in sequence with low balance and not increase balance', async () => {
// This test is specifically checking for the issue reported in production
// where the balance increases after a transaction when it should remain at 0
// Create a balance with a very low amount
await Balance.create({
user: userId,
tokenCredits: 100,
});
// First transaction - should reduce balance to 0
const txData1 = {
user: userId,
conversationId: 'test-convo-1',
model: 'gpt-4',
context: 'test',
};
const tokenUsage1 = {
promptTokens: 100,
completionTokens: 50,
};
await spendTokens(txData1, tokenUsage1);
// Check balance after first transaction
let balance = await Balance.findOne({ user: userId });
expect(balance.tokenCredits).toBe(0);
// Second transaction - should keep balance at 0, not make it negative or increase it
const txData2 = {
user: userId,
conversationId: 'test-convo-2',
model: 'gpt-4',
context: 'test',
};
const tokenUsage2 = {
promptTokens: 200,
completionTokens: 100,
};
await spendTokens(txData2, tokenUsage2);
// Check balance after second transaction - should still be 0
balance = await Balance.findOne({ user: userId });
expect(balance.tokenCredits).toBe(0);
// Verify all transactions were created
const transactions = await Transaction.find({ user: userId });
expect(transactions).toHaveLength(4); // 2 transactions (prompt+completion) for each call
// Let's examine the actual transaction records to see what's happening
const transactionDetails = await Transaction.find({ user: userId }).sort({ createdAt: 1 });
// Log the transaction details for debugging
console.log('Transaction details:');
transactionDetails.forEach((tx, i) => {
console.log(`Transaction ${i + 1}:`, {
tokenType: tx.tokenType,
rawAmount: tx.rawAmount,
tokenValue: tx.tokenValue,
model: tx.model,
});
});
// Check the return values from Transaction.create directly
// This is to verify that the incrementValue is not becoming positive
const directResult = await Transaction.create({
user: userId,
conversationId: 'test-convo-3',
model: 'gpt-4',
tokenType: 'completion',
rawAmount: -100,
context: 'test',
});
console.log('Direct Transaction.create result:', directResult);
// The completion value should never be positive
expect(directResult.completion).not.toBeGreaterThan(0);
});
it('should ensure tokenValue is always negative for spending tokens', async () => {
// Create a balance for the user
await Balance.create({
user: userId,
tokenCredits: 10000,
});
// Test with various models to check multiplier calculations
const models = ['gpt-3.5-turbo', 'gpt-4', 'claude-3-5-sonnet'];
for (const model of models) {
const txData = {
user: userId,
conversationId: `test-convo-${model}`,
model,
context: 'test',
};
const tokenUsage = {
promptTokens: 100,
completionTokens: 50,
};
await spendTokens(txData, tokenUsage);
// Get the transactions for this model
const transactions = await Transaction.find({
user: userId,
model,
});
// Verify tokenValue is negative for all transactions
transactions.forEach((tx) => {
console.log(`Model ${model}, Type ${tx.tokenType}: tokenValue = ${tx.tokenValue}`);
expect(tx.tokenValue).toBeLessThan(0);
});
}
});
it('should handle structured transactions in sequence with low balance', async () => {
// Create a balance with a very low amount
await Balance.create({
user: userId,
tokenCredits: 100,
});
// First transaction - should reduce balance to 0
const txData1 = {
user: userId,
conversationId: 'test-convo-1',
model: 'claude-3-5-sonnet',
context: 'test',
};
const tokenUsage1 = {
promptTokens: {
input: 10,
write: 100,
read: 5,
},
completionTokens: 50,
};
await spendStructuredTokens(txData1, tokenUsage1);
// Check balance after first transaction
let balance = await Balance.findOne({ user: userId });
expect(balance.tokenCredits).toBe(0);
// Second transaction - should keep balance at 0, not make it negative or increase it
const txData2 = {
user: userId,
conversationId: 'test-convo-2',
model: 'claude-3-5-sonnet',
context: 'test',
};
const tokenUsage2 = {
promptTokens: {
input: 20,
write: 200,
read: 10,
},
completionTokens: 100,
};
await spendStructuredTokens(txData2, tokenUsage2);
// Check balance after second transaction - should still be 0
balance = await Balance.findOne({ user: userId });
expect(balance.tokenCredits).toBe(0);
// Verify all transactions were created
const transactions = await Transaction.find({ user: userId });
expect(transactions).toHaveLength(4); // 2 transactions (prompt+completion) for each call
// Let's examine the actual transaction records to see what's happening
const transactionDetails = await Transaction.find({ user: userId }).sort({ createdAt: 1 });
// Log the transaction details for debugging
console.log('Structured transaction details:');
transactionDetails.forEach((tx, i) => {
console.log(`Transaction ${i + 1}:`, {
tokenType: tx.tokenType,
rawAmount: tx.rawAmount,
tokenValue: tx.tokenValue,
inputTokens: tx.inputTokens,
writeTokens: tx.writeTokens,
readTokens: tx.readTokens,
model: tx.model,
});
});
});
it('should not allow balance to go below zero when spending structured tokens', async () => {
// Create a balance with a low amount
await Balance.create({
user: userId,
tokenCredits: 5000,
});
const txData = {
user: userId,
conversationId: 'test-convo',
model: 'claude-3-5-sonnet', // Using a model that supports structured tokens
context: 'test',
};
// Spending more tokens than the user has balance for
const tokenUsage = {
promptTokens: {
input: 100,
write: 1000,
read: 50,
},
completionTokens: 500,
};
const result = await spendStructuredTokens(txData, tokenUsage);
// Verify transactions were created
const transactions = await Transaction.find({ user: userId }).sort({ tokenType: 1 });
expect(transactions).toHaveLength(2);
// Verify balance was reduced to exactly 0, not negative
const balance = await Balance.findOne({ user: userId });
expect(balance).toBeDefined();
expect(balance.tokenCredits).toBe(0);
// The result should show the adjusted values
expect(result).toEqual({
prompt: expect.objectContaining({
user: userId.toString(),
balance: expect.any(Number),
}),
completion: expect.objectContaining({
user: userId.toString(),
balance: 0, // Final balance should be 0
}),
});
});
it('should handle multiple concurrent transactions correctly with a high balance', async () => {
// Create a balance with a high amount
const initialBalance = 10000000;
await Balance.create({
user: userId,
tokenCredits: initialBalance,
});
// Simulate the recordCollectedUsage function from the production code
const conversationId = 'test-concurrent-convo';
const context = 'message';
const model = 'gpt-4';
const amount = 50;
// Create `amount` of usage records to simulate multiple transactions
const collectedUsage = Array.from({ length: amount }, (_, i) => ({
model,
input_tokens: 100 + i * 10, // Increasing input tokens
output_tokens: 50 + i * 5, // Increasing output tokens
input_token_details: {
cache_creation: i % 2 === 0 ? 20 : 0, // Some have cache creation
cache_read: i % 3 === 0 ? 10 : 0, // Some have cache read
},
}));
// Process all transactions concurrently to simulate race conditions
const promises = [];
let expectedTotalSpend = 0;
for (let i = 0; i < collectedUsage.length; i++) {
const usage = collectedUsage[i];
if (!usage) {
continue;
}
const cache_creation = Number(usage.input_token_details?.cache_creation) || 0;
const cache_read = Number(usage.input_token_details?.cache_read) || 0;
const txMetadata = {
context,
conversationId,
user: userId,
model: usage.model,
};
// Calculate expected spend for this transaction
const promptTokens = usage.input_tokens;
const completionTokens = usage.output_tokens;
// For regular transactions
if (cache_creation === 0 && cache_read === 0) {
// Add to expected spend using the correct multipliers from tx.js
// For gpt-4, the multipliers are: prompt=30, completion=60
expectedTotalSpend += promptTokens * 30; // gpt-4 prompt rate is 30
expectedTotalSpend += completionTokens * 60; // gpt-4 completion rate is 60
promises.push(
spendTokens(txMetadata, {
promptTokens,
completionTokens,
}),
);
} else {
// For structured transactions with cache operations
// The multipliers for claude models with cache operations are different
// But since we're using gpt-4 in the test, we need to use appropriate values
expectedTotalSpend += promptTokens * 30; // Base prompt rate for gpt-4
// Since gpt-4 doesn't have cache multipliers defined, we'll use the prompt rate
expectedTotalSpend += cache_creation * 30; // Write rate (using prompt rate as fallback)
expectedTotalSpend += cache_read * 30; // Read rate (using prompt rate as fallback)
expectedTotalSpend += completionTokens * 60; // Completion rate for gpt-4
promises.push(
spendStructuredTokens(txMetadata, {
promptTokens: {
input: promptTokens,
write: cache_creation,
read: cache_read,
},
completionTokens,
}),
);
}
}
// Wait for all transactions to complete
await Promise.all(promises);
// Verify final balance
const finalBalance = await Balance.findOne({ user: userId });
expect(finalBalance).toBeDefined();
// The final balance should be the initial balance minus the expected total spend
const expectedFinalBalance = initialBalance - expectedTotalSpend;
console.log('Initial balance:', initialBalance);
console.log('Expected total spend:', expectedTotalSpend);
console.log('Expected final balance:', expectedFinalBalance);
console.log('Actual final balance:', finalBalance.tokenCredits);
// Allow for small rounding differences
expect(finalBalance.tokenCredits).toBeCloseTo(expectedFinalBalance, 0);
// Verify all transactions were created
const transactions = await Transaction.find({
user: userId,
conversationId,
});
// We should have 2 transactions (prompt + completion) for each usage record
// Some might be structured, some regular
expect(transactions.length).toBeGreaterThanOrEqual(collectedUsage.length);
// Log transaction details for debugging
console.log('Transaction summary:');
let totalTokenValue = 0;
transactions.forEach((tx) => {
console.log(`${tx.tokenType}: rawAmount=${tx.rawAmount}, tokenValue=${tx.tokenValue}`);
totalTokenValue += tx.tokenValue;
});
console.log('Total token value from transactions:', totalTokenValue);
// The difference between expected and actual is significant
// This is likely due to the multipliers being different in the test environment
// Let's adjust our expectation based on the actual transactions
const actualSpend = initialBalance - finalBalance.tokenCredits;
console.log('Actual spend:', actualSpend);
// Instead of checking the exact balance, let's verify that:
// 1. The balance was reduced (tokens were spent)
expect(finalBalance.tokenCredits).toBeLessThan(initialBalance);
// 2. The total token value from transactions matches the actual spend
expect(Math.abs(totalTokenValue)).toBeCloseTo(actualSpend, -3); // Allow for larger differences
});
// Add this new test case
it('should handle multiple concurrent balance increases correctly', async () => {
// Start with zero balance
const initialBalance = 0;
await Balance.create({
user: userId,
tokenCredits: initialBalance,
});
const numberOfRefills = 25;
const refillAmount = 1000;
const promises = [];
for (let i = 0; i < numberOfRefills; i++) {
promises.push(
Transaction.createAutoRefillTransaction({
user: userId,
tokenType: 'credits',
context: 'concurrent-refill-test',
rawAmount: refillAmount,
}),
);
}
// Wait for all refill transactions to complete
const results = await Promise.all(promises);
// Verify final balance
const finalBalance = await Balance.findOne({ user: userId });
expect(finalBalance).toBeDefined();
// The final balance should be the initial balance plus the sum of all refills
const expectedFinalBalance = initialBalance + numberOfRefills * refillAmount;
console.log('Initial balance (Increase Test):', initialBalance);
console.log(`Performed ${numberOfRefills} refills of ${refillAmount} each.`);
console.log('Expected final balance (Increase Test):', expectedFinalBalance);
console.log('Actual final balance (Increase Test):', finalBalance.tokenCredits);
// Use toBeCloseTo for safety, though toBe should work for integer math
expect(finalBalance.tokenCredits).toBeCloseTo(expectedFinalBalance, 0);
// Verify all transactions were created
const transactions = await Transaction.find({
user: userId,
context: 'concurrent-refill-test',
});
// We should have one transaction for each refill attempt
expect(transactions.length).toBe(numberOfRefills);
// Optional: Verify the sum of increments from the results matches the balance change
const totalIncrementReported = results.reduce((sum, result) => {
// Assuming createAutoRefillTransaction returns an object with the increment amount
// Adjust this based on the actual return structure.
// Let's assume it returns { balance: newBalance, transaction: { rawAmount: ... } }
// Or perhaps we check the transaction.rawAmount directly
return sum + (result?.transaction?.rawAmount || 0);
}, 0);
console.log('Total increment reported by results:', totalIncrementReported);
expect(totalIncrementReported).toBe(expectedFinalBalance - initialBalance);
// Optional: Check the sum of tokenValue from saved transactions
let totalTokenValueFromDb = 0;
transactions.forEach((tx) => {
// For refills, rawAmount is positive, and tokenValue might be calculated based on it
// Let's assume tokenValue directly reflects the increment for simplicity here
// If calculation is involved, adjust accordingly
totalTokenValueFromDb += tx.rawAmount; // Or tx.tokenValue if that holds the increment
});
console.log('Total rawAmount from DB transactions:', totalTokenValueFromDb);
expect(totalTokenValueFromDb).toBeCloseTo(expectedFinalBalance - initialBalance, 0);
expect(Transaction.create).toHaveBeenCalledTimes(2);
expect(Balance.findOne).not.toHaveBeenCalled();
expect(Balance.findOneAndUpdate).not.toHaveBeenCalled();
});
it('should create structured transactions for both prompt and completion tokens', async () => {
// Create a balance for the user
await Balance.create({
user: userId,
tokenCredits: 10000,
});
const txData = {
user: userId,
user: new mongoose.Types.ObjectId(),
conversationId: 'test-convo',
model: 'claude-3-5-sonnet',
context: 'test',
@@ -688,37 +150,48 @@ describe('spendTokens', () => {
completionTokens: 50,
};
const result = await spendStructuredTokens(txData, tokenUsage);
// Verify transactions were created
const transactions = await Transaction.find({ user: userId }).sort({ tokenType: 1 });
expect(transactions).toHaveLength(2);
// Check completion transaction
expect(transactions[0].tokenType).toBe('completion');
expect(transactions[0].rawAmount).toBe(-50);
// Check prompt transaction
expect(transactions[1].tokenType).toBe('prompt');
expect(transactions[1].inputTokens).toBe(-10);
expect(transactions[1].writeTokens).toBe(-100);
expect(transactions[1].readTokens).toBe(-5);
// Verify result contains transaction info
expect(result).toEqual({
prompt: expect.objectContaining({
user: userId.toString(),
prompt: expect.any(Number),
}),
completion: expect.objectContaining({
user: userId.toString(),
completion: expect.any(Number),
}),
Transaction.createStructured.mockResolvedValueOnce({
rate: 3.75,
user: txData.user.toString(),
balance: 9570,
prompt: -430,
});
Transaction.create.mockResolvedValueOnce({
rate: 15,
user: txData.user.toString(),
balance: 8820,
completion: -750,
});
// Verify balance was updated
const balance = await Balance.findOne({ user: userId });
expect(balance).toBeDefined();
expect(balance.tokenCredits).toBeLessThan(10000); // Balance should be reduced
const result = await spendStructuredTokens(txData, tokenUsage);
expect(Transaction.createStructured).toHaveBeenCalledWith(
expect.objectContaining({
tokenType: 'prompt',
inputTokens: -10,
writeTokens: -100,
readTokens: -5,
}),
);
expect(Transaction.create).toHaveBeenCalledWith(
expect.objectContaining({
tokenType: 'completion',
rawAmount: -50,
}),
);
expect(result).toEqual({
prompt: expect.objectContaining({
rate: 3.75,
user: txData.user.toString(),
balance: 9570,
prompt: -430,
}),
completion: expect.objectContaining({
rate: 15,
user: txData.user.toString(),
balance: 8820,
completion: -750,
}),
});
});
});

View File

@@ -61,7 +61,6 @@ const bedrockValues = {
'amazon.nova-micro-v1:0': { prompt: 0.035, completion: 0.14 },
'amazon.nova-lite-v1:0': { prompt: 0.06, completion: 0.24 },
'amazon.nova-pro-v1:0': { prompt: 0.8, completion: 3.2 },
'deepseek.r1': { prompt: 1.35, completion: 5.4 },
};
/**
@@ -80,7 +79,6 @@ const tokenValues = Object.assign(
'o1-mini': { prompt: 1.1, completion: 4.4 },
'o1-preview': { prompt: 15, completion: 60 },
o1: { prompt: 15, completion: 60 },
'gpt-4.5': { prompt: 75, completion: 150 },
'gpt-4o-mini': { prompt: 0.15, completion: 0.6 },
'gpt-4o': { prompt: 2.5, completion: 10 },
'gpt-4o-2024-05-13': { prompt: 5, completion: 15 },
@@ -90,8 +88,6 @@ const tokenValues = Object.assign(
'claude-3-sonnet': { prompt: 3, completion: 15 },
'claude-3-5-sonnet': { prompt: 3, completion: 15 },
'claude-3.5-sonnet': { prompt: 3, completion: 15 },
'claude-3-7-sonnet': { prompt: 3, completion: 15 },
'claude-3.7-sonnet': { prompt: 3, completion: 15 },
'claude-3-5-haiku': { prompt: 0.8, completion: 4 },
'claude-3.5-haiku': { prompt: 0.8, completion: 4 },
'claude-3-haiku': { prompt: 0.25, completion: 1.25 },
@@ -109,26 +105,11 @@ const tokenValues = Object.assign(
'gemini-2.0-flash-lite': { prompt: 0.075, completion: 0.3 },
'gemini-2.0-flash': { prompt: 0.1, completion: 0.7 },
'gemini-2.0': { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing
'gemini-2.5': { prompt: 0, completion: 0 }, // Free for a period of time
'gemini-1.5-flash-8b': { prompt: 0.075, completion: 0.3 },
'gemini-1.5-flash': { prompt: 0.15, completion: 0.6 },
'gemini-1.5': { prompt: 2.5, completion: 10 },
'gemini-pro-vision': { prompt: 0.5, completion: 1.5 },
gemini: { prompt: 0.5, completion: 1.5 },
'grok-2-vision-1212': { prompt: 2.0, completion: 10.0 },
'grok-2-vision-latest': { prompt: 2.0, completion: 10.0 },
'grok-2-vision': { prompt: 2.0, completion: 10.0 },
'grok-vision-beta': { prompt: 5.0, completion: 15.0 },
'grok-2-1212': { prompt: 2.0, completion: 10.0 },
'grok-2-latest': { prompt: 2.0, completion: 10.0 },
'grok-2': { prompt: 2.0, completion: 10.0 },
'grok-beta': { prompt: 5.0, completion: 15.0 },
'mistral-large': { prompt: 2.0, completion: 6.0 },
'pixtral-large': { prompt: 2.0, completion: 6.0 },
'mistral-saba': { prompt: 0.2, completion: 0.6 },
codestral: { prompt: 0.3, completion: 0.9 },
'ministral-8b': { prompt: 0.1, completion: 0.1 },
'ministral-3b': { prompt: 0.04, completion: 0.04 },
},
bedrockValues,
);
@@ -140,8 +121,6 @@ const tokenValues = Object.assign(
* @type {Object.<string, {write: number, read: number }>}
*/
const cacheTokenValues = {
'claude-3.7-sonnet': { write: 3.75, read: 0.3 },
'claude-3-7-sonnet': { write: 3.75, read: 0.3 },
'claude-3.5-sonnet': { write: 3.75, read: 0.3 },
'claude-3-5-sonnet': { write: 3.75, read: 0.3 },
'claude-3.5-haiku': { write: 1, read: 0.08 },
@@ -176,8 +155,6 @@ const getValueKey = (model, endpoint) => {
return 'o1-mini';
} else if (modelName.includes('o1')) {
return 'o1';
} else if (modelName.includes('gpt-4.5')) {
return 'gpt-4.5';
} else if (modelName.includes('gpt-4o-2024-05-13')) {
return 'gpt-4o-2024-05-13';
} else if (modelName.includes('gpt-4o-mini')) {

View File

@@ -50,16 +50,6 @@ describe('getValueKey', () => {
expect(getValueKey('gpt-4-0125')).toBe('gpt-4-1106');
});
it('should return "gpt-4.5" for model type of "gpt-4.5"', () => {
expect(getValueKey('gpt-4.5-preview')).toBe('gpt-4.5');
expect(getValueKey('gpt-4.5-2024-08-06')).toBe('gpt-4.5');
expect(getValueKey('gpt-4.5-2024-08-06-0718')).toBe('gpt-4.5');
expect(getValueKey('openai/gpt-4.5')).toBe('gpt-4.5');
expect(getValueKey('openai/gpt-4.5-2024-08-06')).toBe('gpt-4.5');
expect(getValueKey('gpt-4.5-turbo')).toBe('gpt-4.5');
expect(getValueKey('gpt-4.5-0125')).toBe('gpt-4.5');
});
it('should return "gpt-4o" for model type of "gpt-4o"', () => {
expect(getValueKey('gpt-4o-2024-08-06')).toBe('gpt-4o');
expect(getValueKey('gpt-4o-2024-08-06-0718')).toBe('gpt-4o');
@@ -90,20 +80,6 @@ describe('getValueKey', () => {
expect(getValueKey('chatgpt-4o-latest-0718')).toBe('gpt-4o');
});
it('should return "claude-3-7-sonnet" for model type of "claude-3-7-sonnet-"', () => {
expect(getValueKey('claude-3-7-sonnet-20240620')).toBe('claude-3-7-sonnet');
expect(getValueKey('anthropic/claude-3-7-sonnet')).toBe('claude-3-7-sonnet');
expect(getValueKey('claude-3-7-sonnet-turbo')).toBe('claude-3-7-sonnet');
expect(getValueKey('claude-3-7-sonnet-0125')).toBe('claude-3-7-sonnet');
});
it('should return "claude-3.7-sonnet" for model type of "claude-3.7-sonnet-"', () => {
expect(getValueKey('claude-3.7-sonnet-20240620')).toBe('claude-3.7-sonnet');
expect(getValueKey('anthropic/claude-3.7-sonnet')).toBe('claude-3.7-sonnet');
expect(getValueKey('claude-3.7-sonnet-turbo')).toBe('claude-3.7-sonnet');
expect(getValueKey('claude-3.7-sonnet-0125')).toBe('claude-3.7-sonnet');
});
it('should return "claude-3-5-sonnet" for model type of "claude-3-5-sonnet-"', () => {
expect(getValueKey('claude-3-5-sonnet-20240620')).toBe('claude-3-5-sonnet');
expect(getValueKey('anthropic/claude-3-5-sonnet')).toBe('claude-3-5-sonnet');
@@ -288,7 +264,7 @@ describe('AWS Bedrock Model Tests', () => {
});
describe('Deepseek Model Tests', () => {
const deepseekModels = ['deepseek-chat', 'deepseek-coder', 'deepseek-reasoner', 'deepseek.r1'];
const deepseekModels = ['deepseek-chat', 'deepseek-coder', 'deepseek-reasoner'];
it('should return the correct prompt multipliers for all models', () => {
const results = deepseekModels.map((model) => {
@@ -482,30 +458,3 @@ describe('Google Model Tests', () => {
});
});
});
describe('Grok Model Tests - Pricing', () => {
describe('getMultiplier', () => {
test('should return correct prompt and completion rates for Grok vision models', () => {
const models = ['grok-2-vision-1212', 'grok-2-vision', 'grok-2-vision-latest'];
models.forEach((model) => {
expect(getMultiplier({ model, tokenType: 'prompt' })).toBe(2.0);
expect(getMultiplier({ model, tokenType: 'completion' })).toBe(10.0);
});
});
test('should return correct prompt and completion rates for Grok text models', () => {
const models = ['grok-2-1212', 'grok-2', 'grok-2-latest'];
models.forEach((model) => {
expect(getMultiplier({ model, tokenType: 'prompt' })).toBe(2.0);
expect(getMultiplier({ model, tokenType: 'completion' })).toBe(10.0);
});
});
test('should return correct prompt and completion rates for Grok beta models', () => {
expect(getMultiplier({ model: 'grok-vision-beta', tokenType: 'prompt' })).toBe(5.0);
expect(getMultiplier({ model: 'grok-vision-beta', tokenType: 'completion' })).toBe(15.0);
expect(getMultiplier({ model: 'grok-beta', tokenType: 'prompt' })).toBe(5.0);
expect(getMultiplier({ model: 'grok-beta', tokenType: 'completion' })).toBe(15.0);
});
});
});

View File

@@ -1,6 +1,6 @@
const bcrypt = require('bcryptjs');
const { getBalanceConfig } = require('~/server/services/Config');
const signPayload = require('~/server/services/signPayload');
const { isEnabled } = require('~/server/utils/handleText');
const Balance = require('./Balance');
const User = require('./User');
@@ -13,9 +13,11 @@ const User = require('./User');
*/
const getUserById = async function (userId, fieldsToSelect = null) {
const query = User.findById(userId);
if (fieldsToSelect) {
query.select(fieldsToSelect);
}
return await query.lean();
};
@@ -30,6 +32,7 @@ const findUser = async function (searchCriteria, fieldsToSelect = null) {
if (fieldsToSelect) {
query.select(fieldsToSelect);
}
return await query.lean();
};
@@ -55,12 +58,11 @@ const updateUser = async function (userId, updateData) {
* Creates a new user, optionally with a TTL of 1 week.
* @param {MongoUser} data - The user data to be created, must contain user_id.
* @param {boolean} [disableTTL=true] - Whether to disable the TTL. Defaults to `true`.
* @param {boolean} [returnUser=false] - Whether to return the created user object.
* @returns {Promise<ObjectId|MongoUser>} A promise that resolves to the created user document ID or user object.
* @param {boolean} [returnUser=false] - Whether to disable the TTL. Defaults to `true`.
* @returns {Promise<ObjectId>} A promise that resolves to the created user document ID.
* @throws {Error} If a user with the same user_id already exists.
*/
const createUser = async (data, disableTTL = true, returnUser = false) => {
const balance = await getBalanceConfig();
const userData = {
...data,
expiresAt: disableTTL ? null : new Date(Date.now() + 604800 * 1000), // 1 week in milliseconds
@@ -72,27 +74,13 @@ const createUser = async (data, disableTTL = true, returnUser = false) => {
const user = await User.create(userData);
// If balance is enabled, create or update a balance record for the user using global.interfaceConfig.balance
if (balance?.enabled && balance?.startBalance) {
const update = {
$inc: { tokenCredits: balance.startBalance },
};
if (
balance.autoRefillEnabled &&
balance.refillIntervalValue != null &&
balance.refillIntervalUnit != null &&
balance.refillAmount != null
) {
update.$set = {
autoRefillEnabled: true,
refillIntervalValue: balance.refillIntervalValue,
refillIntervalUnit: balance.refillIntervalUnit,
refillAmount: balance.refillAmount,
};
}
await Balance.findOneAndUpdate({ user: user._id }, update, { upsert: true, new: true }).lean();
if (isEnabled(process.env.CHECK_BALANCE) && process.env.START_BALANCE) {
let incrementValue = parseInt(process.env.START_BALANCE);
await Balance.findOneAndUpdate(
{ user: user._id },
{ $inc: { tokenCredits: incrementValue } },
{ upsert: true, new: true },
).lean();
}
if (returnUser) {
@@ -135,7 +123,7 @@ const expires = eval(SESSION_EXPIRY) ?? 1000 * 60 * 15;
/**
* Generates a JWT token for a given user.
*
* @param {MongoUser} user - The user for whom the token is being generated.
* @param {MongoUser} user - ID of the user for whom the token is being generated.
* @returns {Promise<string>} A promise that resolves to a JWT token.
*/
const generateToken = async (user) => {
@@ -158,7 +146,7 @@ const generateToken = async (user) => {
/**
* Compares the provided password with the user's password.
*
* @param {MongoUser} user - The user to compare the password for.
* @param {MongoUser} user - the user to compare password for.
* @param {string} candidatePassword - The password to test against the user's password.
* @returns {Promise<boolean>} A promise that resolves to a boolean indicating if the password matches.
*/

View File

@@ -1,6 +1,6 @@
{
"name": "@librechat/backend",
"version": "v0.7.7",
"version": "v0.7.7-rc1",
"description": "",
"scripts": {
"start": "echo 'please run this from the root directory'",
@@ -34,25 +34,20 @@
},
"homepage": "https://librechat.ai",
"dependencies": {
"@anthropic-ai/sdk": "^0.37.0",
"@aws-sdk/client-s3": "^3.758.0",
"@aws-sdk/s3-request-presigner": "^3.758.0",
"@azure/identity": "^4.7.0",
"@anthropic-ai/sdk": "^0.32.1",
"@azure/search-documents": "^12.0.0",
"@azure/storage-blob": "^12.26.0",
"@google/generative-ai": "^0.23.0",
"@google/generative-ai": "^0.21.0",
"@googleapis/youtube": "^20.0.0",
"@keyv/mongo": "^2.1.8",
"@keyv/redis": "^2.8.1",
"@langchain/community": "^0.3.34",
"@langchain/core": "^0.3.40",
"@langchain/google-genai": "^0.1.11",
"@langchain/google-vertexai": "^0.2.2",
"@langchain/community": "^0.3.14",
"@langchain/core": "^0.3.37",
"@langchain/google-genai": "^0.1.7",
"@langchain/google-vertexai": "^0.1.8",
"@langchain/textsplitters": "^0.1.0",
"@librechat/agents": "^2.3.95",
"@librechat/data-schemas": "*",
"@librechat/agents": "^2.1.2",
"@waylaidwanderer/fetch-event-source": "^3.0.1",
"axios": "^1.8.2",
"axios": "1.7.8",
"bcryptjs": "^2.4.3",
"cohere-ai": "^7.9.1",
"compression": "^1.7.4",
@@ -62,12 +57,10 @@
"cors": "^2.8.5",
"dedent": "^1.5.3",
"dotenv": "^16.0.3",
"eventsource": "^3.0.2",
"express": "^4.21.2",
"express-mongo-sanitize": "^2.2.0",
"express-rate-limit": "^7.4.1",
"express-session": "^1.18.1",
"express-static-gzip": "^2.2.0",
"file-type": "^18.7.0",
"firebase": "^11.0.2",
"googleapis": "^126.0.1",
@@ -79,6 +72,7 @@
"keyv": "^4.5.4",
"keyv-file": "^0.2.0",
"klona": "^2.0.6",
"langchain": "^0.2.19",
"librechat-data-provider": "*",
"librechat-mcp": "*",
"lodash": "^4.17.21",
@@ -86,7 +80,7 @@
"memorystore": "^1.6.7",
"mime": "^3.0.0",
"module-alias": "^2.2.3",
"mongoose": "^8.12.1",
"mongoose": "^8.9.5",
"multer": "^1.4.5-lts.1",
"nanoid": "^3.3.7",
"nodemailer": "^6.9.15",
@@ -103,8 +97,8 @@
"passport-jwt": "^4.0.1",
"passport-ldapauth": "^3.0.1",
"passport-local": "^1.0.0",
"rate-limit-redis": "^4.2.0",
"sharp": "^0.33.5",
"passport-simple-webauthn2": "^3.2.0",
"sharp": "^0.32.6",
"tiktoken": "^1.0.15",
"traverse": "^0.6.7",
"ua-parser-js": "^1.0.36",

View File

@@ -1,7 +1,6 @@
const { CacheKeys } = require('librechat-data-provider');
const { loadDefaultModels, loadConfigModels } = require('~/server/services/Config');
const { getLogStores } = require('~/cache');
const { logger } = require('~/config');
/**
* @param {ServerRequest} req
@@ -37,13 +36,8 @@ async function loadModels(req) {
}
async function modelController(req, res) {
try {
const modelConfig = await loadModels(req);
res.send(modelConfig);
} catch (error) {
logger.error('Error fetching models:', error);
res.status(500).send({ error: error.message });
}
const modelConfig = await loadModels(req);
res.send(modelConfig);
}
module.exports = { modelController, loadModels, getModelsConfig };

View File

@@ -1,81 +1,66 @@
const {
generateTOTPSecret,
generateBackupCodes,
verifyTOTP,
verifyBackupCode,
generateTOTPSecret,
generateBackupCodes,
getTOTPSecret,
} = require('~/server/services/twoFactorService');
const { updateUser, getUserById } = require('~/models');
const { logger } = require('~/config');
const { encryptV3 } = require('~/server/utils/crypto');
const { encryptV2 } = require('~/server/utils/crypto');
const safeAppTitle = (process.env.APP_TITLE || 'LibreChat').replace(/\s+/g, '');
const enable2FAController = async (req, res) => {
const safeAppTitle = (process.env.APP_TITLE || 'LibreChat').replace(/\s+/g, '');
/**
* Enable 2FA for the user by generating a new TOTP secret and backup codes.
* The secret is encrypted and stored, and 2FA is marked as disabled until confirmed.
*/
const enable2FA = async (req, res) => {
try {
const userId = req.user.id;
const secret = generateTOTPSecret();
const { plainCodes, codeObjects } = await generateBackupCodes();
// Encrypt the secret with v3 encryption before saving.
const encryptedSecret = encryptV3(secret);
// Update the user record: store the secret & backup codes and set twoFactorEnabled to false.
const user = await updateUser(userId, {
totpSecret: encryptedSecret,
backupCodes: codeObjects,
twoFactorEnabled: false,
});
const encryptedSecret = await encryptV2(secret);
const user = await updateUser(userId, { totpSecret: encryptedSecret, backupCodes: codeObjects });
const otpauthUrl = `otpauth://totp/${safeAppTitle}:${user.email}?secret=${secret}&issuer=${safeAppTitle}`;
return res.status(200).json({ otpauthUrl, backupCodes: plainCodes });
res.status(200).json({
otpauthUrl,
backupCodes: plainCodes,
});
} catch (err) {
logger.error('[enable2FA]', err);
return res.status(500).json({ message: err.message });
logger.error('[enable2FAController]', err);
res.status(500).json({ message: err.message });
}
};
/**
* Verify a 2FA code (either TOTP or backup code) during setup.
*/
const verify2FA = async (req, res) => {
const verify2FAController = async (req, res) => {
try {
const userId = req.user.id;
const { token, backupCode } = req.body;
const user = await getUserById(userId);
if (!user || !user.totpSecret) {
return res.status(400).json({ message: '2FA not initiated' });
}
// Retrieve the plain TOTP secret using getTOTPSecret.
const secret = await getTOTPSecret(user.totpSecret);
let isVerified = false;
if (token) {
isVerified = await verifyTOTP(secret, token);
} else if (backupCode) {
isVerified = await verifyBackupCode({ user, backupCode });
}
if (isVerified) {
if (token && (await verifyTOTP(secret, token))) {
return res.status(200).json();
} else if (backupCode) {
const verified = await verifyBackupCode({ user, backupCode });
if (verified) {
return res.status(200).json();
}
}
return res.status(400).json({ message: 'Invalid token or backup code.' });
return res.status(400).json({ message: 'Invalid token.' });
} catch (err) {
logger.error('[verify2FA]', err);
return res.status(500).json({ message: err.message });
logger.error('[verify2FAController]', err);
res.status(500).json({ message: err.message });
}
};
/**
* Confirm and enable 2FA after a successful verification.
*/
const confirm2FA = async (req, res) => {
const confirm2FAController = async (req, res) => {
try {
const userId = req.user.id;
const { token } = req.body;
@@ -85,54 +70,50 @@ const confirm2FA = async (req, res) => {
return res.status(400).json({ message: '2FA not initiated' });
}
// Retrieve the plain TOTP secret using getTOTPSecret.
const secret = await getTOTPSecret(user.totpSecret);
if (await verifyTOTP(secret, token)) {
await updateUser(userId, { twoFactorEnabled: true });
return res.status(200).json();
}
return res.status(400).json({ message: 'Invalid token.' });
} catch (err) {
logger.error('[confirm2FA]', err);
return res.status(500).json({ message: err.message });
logger.error('[confirm2FAController]', err);
res.status(500).json({ message: err.message });
}
};
/**
* Disable 2FA by clearing the stored secret and backup codes.
*/
const disable2FA = async (req, res) => {
const disable2FAController = async (req, res) => {
try {
const userId = req.user.id;
await updateUser(userId, { totpSecret: null, backupCodes: [], twoFactorEnabled: false });
return res.status(200).json();
await updateUser(userId, { totpSecret: null, backupCodes: [] });
res.status(200).json();
} catch (err) {
logger.error('[disable2FA]', err);
return res.status(500).json({ message: err.message });
logger.error('[disable2FAController]', err);
res.status(500).json({ message: err.message });
}
};
/**
* Regenerate backup codes for the user.
*/
const regenerateBackupCodes = async (req, res) => {
const regenerateBackupCodesController = async (req, res) => {
try {
const userId = req.user.id;
const { plainCodes, codeObjects } = await generateBackupCodes();
await updateUser(userId, { backupCodes: codeObjects });
return res.status(200).json({
res.status(200).json({
backupCodes: plainCodes,
backupCodesHash: codeObjects,
});
} catch (err) {
logger.error('[regenerateBackupCodes]', err);
return res.status(500).json({ message: err.message });
logger.error('[regenerateBackupCodesController]', err);
res.status(500).json({ message: err.message });
}
};
module.exports = {
enable2FA,
verify2FA,
confirm2FA,
disable2FA,
regenerateBackupCodes,
enable2FAController,
verify2FAController,
confirm2FAController,
disable2FAController,
regenerateBackupCodesController,
};

View File

@@ -1,8 +1,6 @@
const { FileSources } = require('librechat-data-provider');
const {
Balance,
getFiles,
updateUser,
deleteFiles,
deleteConvos,
deletePresets,
@@ -14,7 +12,6 @@ const User = require('~/models/User');
const { updateUserPluginAuth, deleteUserPluginAuth } = require('~/server/services/PluginService');
const { updateUserPluginsService, deleteUserKey } = require('~/server/services/UserService');
const { verifyEmail, resendVerificationEmail } = require('~/server/services/AuthService');
const { needsRefresh, getNewS3URL } = require('~/server/services/Files/S3/crud');
const { processDeleteRequest } = require('~/server/services/Files/process');
const { deleteAllSharedLinks } = require('~/models/Share');
const { deleteToolCalls } = require('~/models/ToolCall');
@@ -22,23 +19,8 @@ const { Transaction } = require('~/models/Transaction');
const { logger } = require('~/config');
const getUserController = async (req, res) => {
/** @type {MongoUser} */
const userData = req.user.toObject != null ? req.user.toObject() : { ...req.user };
delete userData.totpSecret;
if (req.app.locals.fileStrategy === FileSources.s3 && userData.avatar) {
const avatarNeedsRefresh = needsRefresh(userData.avatar, 3600);
if (!avatarNeedsRefresh) {
return res.status(200).send(userData);
}
const originalAvatar = userData.avatar;
try {
userData.avatar = await getNewS3URL(userData.avatar);
await updateUser(userData.id, { avatar: userData.avatar });
} catch (error) {
userData.avatar = originalAvatar;
logger.error('Error getting new S3 URL for avatar:', error);
}
}
res.status(200).send(userData);
};

View File

@@ -1,5 +1,4 @@
const { nanoid } = require('nanoid');
const { Tools, StepTypes, FileContext } = require('librechat-data-provider');
const { Tools, StepTypes, imageGenTools, FileContext } = require('librechat-data-provider');
const {
EnvVar,
Providers,
@@ -10,8 +9,8 @@ const {
ChatModelStreamHandler,
} = require('@librechat/agents');
const { processCodeOutput } = require('~/server/services/Files/Code/process');
const { loadAuthValues } = require('~/server/services/Tools/credentials');
const { saveBase64Image } = require('~/server/services/Files/process');
const { loadAuthValues } = require('~/app/clients/tools/util');
const { logger, sendEvent } = require('~/config');
/** @typedef {import('@librechat/agents').Graph} Graph */
@@ -243,6 +242,32 @@ function createToolEndCallback({ req, res, artifactPromises }) {
return;
}
if (imageGenTools.has(output.name)) {
artifactPromises.push(
(async () => {
const fileMetadata = Object.assign(output.artifact, {
messageId: metadata.run_id,
toolCallId: output.tool_call_id,
conversationId: metadata.thread_id,
});
if (!res.headersSent) {
return fileMetadata;
}
if (!fileMetadata) {
return null;
}
res.write(`event: attachment\ndata: ${JSON.stringify(fileMetadata)}\n\n`);
return fileMetadata;
})().catch((error) => {
logger.error('Error processing code output:', error);
return null;
}),
);
return;
}
if (output.artifact.content) {
/** @type {FormattedContent[]} */
const content = output.artifact.content;
@@ -253,7 +278,7 @@ function createToolEndCallback({ req, res, artifactPromises }) {
const { url } = part.image_url;
artifactPromises.push(
(async () => {
const filename = `${output.name}_${output.tool_call_id}_img_${nanoid()}`;
const filename = `${output.tool_call_id}-image-${new Date().getTime()}`;
const file = await saveBase64Image(url, {
req,
filename,

Some files were not shown because too many files have changed in this diff Show More