Compare commits
14 Commits
feat/group
...
dev-exampl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7251308244 | ||
|
|
799f0e5810 | ||
|
|
cbda3cb529 | ||
|
|
3ab1bd65e5 | ||
|
|
c551ba21f5 | ||
|
|
c87422a1e0 | ||
|
|
b169306096 | ||
|
|
42977ac0d0 | ||
|
|
d9a0fe03ed | ||
|
|
d39b99971f | ||
|
|
1b7e044bf5 | ||
|
|
5c947be455 | ||
|
|
2b2f7fe289 | ||
|
|
a058963a9f |
@@ -58,7 +58,7 @@ DEBUG_CONSOLE=false
|
||||
# Endpoints #
|
||||
#===================================================#
|
||||
|
||||
# ENDPOINTS=openAI,assistants,azureOpenAI,google,gptPlugins,anthropic
|
||||
# ENDPOINTS=openAI,assistants,azureOpenAI,google,anthropic
|
||||
|
||||
PROXY=
|
||||
|
||||
@@ -142,10 +142,10 @@ GOOGLE_KEY=user_provided
|
||||
# GOOGLE_AUTH_HEADER=true
|
||||
|
||||
# Gemini API (AI Studio)
|
||||
# GOOGLE_MODELS=gemini-2.5-pro-preview-05-06,gemini-2.5-flash-preview-04-17,gemini-2.0-flash-001,gemini-2.0-flash-exp,gemini-2.0-flash-lite-001,gemini-1.5-pro-002,gemini-1.5-flash-002
|
||||
# GOOGLE_MODELS=gemini-2.5-pro,gemini-2.5-flash,gemini-2.5-flash-lite-preview-06-17,gemini-2.0-flash,gemini-2.0-flash-lite
|
||||
|
||||
# Vertex AI
|
||||
# GOOGLE_MODELS=gemini-2.5-pro-preview-05-06,gemini-2.5-flash-preview-04-17,gemini-2.0-flash-001,gemini-2.0-flash-exp,gemini-2.0-flash-lite-001,gemini-1.5-pro-002,gemini-1.5-flash-002
|
||||
# GOOGLE_MODELS=gemini-2.5-pro,gemini-2.5-flash,gemini-2.5-flash-lite-preview-06-17,gemini-2.0-flash-001,gemini-2.0-flash-lite-001
|
||||
|
||||
# GOOGLE_TITLE_MODEL=gemini-2.0-flash-lite-001
|
||||
|
||||
@@ -657,4 +657,4 @@ OPENWEATHER_API_KEY=
|
||||
# Reranker (Required)
|
||||
# JINA_API_KEY=your_jina_api_key
|
||||
# or
|
||||
# COHERE_API_KEY=your_cohere_api_key
|
||||
# COHERE_API_KEY=your_cohere_api_key
|
||||
|
||||
@@ -1,804 +0,0 @@
|
||||
const { Keyv } = require('keyv');
|
||||
const crypto = require('crypto');
|
||||
const { CohereClient } = require('cohere-ai');
|
||||
const { fetchEventSource } = require('@waylaidwanderer/fetch-event-source');
|
||||
const { constructAzureURL, genAzureChatCompletion } = require('@librechat/api');
|
||||
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
|
||||
const {
|
||||
ImageDetail,
|
||||
EModelEndpoint,
|
||||
resolveHeaders,
|
||||
CohereConstants,
|
||||
mapModelToAzureConfig,
|
||||
} = require('librechat-data-provider');
|
||||
const { createContextHandlers } = require('./prompts');
|
||||
const { createCoherePayload } = require('./llm');
|
||||
const { extractBaseURL } = require('~/utils');
|
||||
const BaseClient = require('./BaseClient');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const CHATGPT_MODEL = 'gpt-3.5-turbo';
|
||||
const tokenizersCache = {};
|
||||
|
||||
class ChatGPTClient extends BaseClient {
|
||||
constructor(apiKey, options = {}, cacheOptions = {}) {
|
||||
super(apiKey, options, cacheOptions);
|
||||
|
||||
cacheOptions.namespace = cacheOptions.namespace || 'chatgpt';
|
||||
this.conversationsCache = new Keyv(cacheOptions);
|
||||
this.setOptions(options);
|
||||
}
|
||||
|
||||
setOptions(options) {
|
||||
if (this.options && !this.options.replaceOptions) {
|
||||
// nested options aren't spread properly, so we need to do this manually
|
||||
this.options.modelOptions = {
|
||||
...this.options.modelOptions,
|
||||
...options.modelOptions,
|
||||
};
|
||||
delete options.modelOptions;
|
||||
// now we can merge options
|
||||
this.options = {
|
||||
...this.options,
|
||||
...options,
|
||||
};
|
||||
} else {
|
||||
this.options = options;
|
||||
}
|
||||
|
||||
if (this.options.openaiApiKey) {
|
||||
this.apiKey = this.options.openaiApiKey;
|
||||
}
|
||||
|
||||
const modelOptions = this.options.modelOptions || {};
|
||||
this.modelOptions = {
|
||||
...modelOptions,
|
||||
// set some good defaults (check for undefined in some cases because they may be 0)
|
||||
model: modelOptions.model || CHATGPT_MODEL,
|
||||
temperature: typeof modelOptions.temperature === 'undefined' ? 0.8 : modelOptions.temperature,
|
||||
top_p: typeof modelOptions.top_p === 'undefined' ? 1 : modelOptions.top_p,
|
||||
presence_penalty:
|
||||
typeof modelOptions.presence_penalty === 'undefined' ? 1 : modelOptions.presence_penalty,
|
||||
stop: modelOptions.stop,
|
||||
};
|
||||
|
||||
this.isChatGptModel = this.modelOptions.model.includes('gpt-');
|
||||
const { isChatGptModel } = this;
|
||||
this.isUnofficialChatGptModel =
|
||||
this.modelOptions.model.startsWith('text-chat') ||
|
||||
this.modelOptions.model.startsWith('text-davinci-002-render');
|
||||
const { isUnofficialChatGptModel } = this;
|
||||
|
||||
// Davinci models have a max context length of 4097 tokens.
|
||||
this.maxContextTokens = this.options.maxContextTokens || (isChatGptModel ? 4095 : 4097);
|
||||
// I decided to reserve 1024 tokens for the response.
|
||||
// The max prompt tokens is determined by the max context tokens minus the max response tokens.
|
||||
// Earlier messages will be dropped until the prompt is within the limit.
|
||||
this.maxResponseTokens = this.modelOptions.max_tokens || 1024;
|
||||
this.maxPromptTokens =
|
||||
this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens;
|
||||
|
||||
if (this.maxPromptTokens + this.maxResponseTokens > this.maxContextTokens) {
|
||||
throw new Error(
|
||||
`maxPromptTokens + max_tokens (${this.maxPromptTokens} + ${this.maxResponseTokens} = ${
|
||||
this.maxPromptTokens + this.maxResponseTokens
|
||||
}) must be less than or equal to maxContextTokens (${this.maxContextTokens})`,
|
||||
);
|
||||
}
|
||||
|
||||
this.userLabel = this.options.userLabel || 'User';
|
||||
this.chatGptLabel = this.options.chatGptLabel || 'ChatGPT';
|
||||
|
||||
if (isChatGptModel) {
|
||||
// Use these faux tokens to help the AI understand the context since we are building the chat log ourselves.
|
||||
// Trying to use "<|im_start|>" causes the AI to still generate "<" or "<|" at the end sometimes for some reason,
|
||||
// without tripping the stop sequences, so I'm using "||>" instead.
|
||||
this.startToken = '||>';
|
||||
this.endToken = '';
|
||||
this.gptEncoder = this.constructor.getTokenizer('cl100k_base');
|
||||
} else if (isUnofficialChatGptModel) {
|
||||
this.startToken = '<|im_start|>';
|
||||
this.endToken = '<|im_end|>';
|
||||
this.gptEncoder = this.constructor.getTokenizer('text-davinci-003', true, {
|
||||
'<|im_start|>': 100264,
|
||||
'<|im_end|>': 100265,
|
||||
});
|
||||
} else {
|
||||
// Previously I was trying to use "<|endoftext|>" but there seems to be some bug with OpenAI's token counting
|
||||
// system that causes only the first "<|endoftext|>" to be counted as 1 token, and the rest are not treated
|
||||
// as a single token. So we're using this instead.
|
||||
this.startToken = '||>';
|
||||
this.endToken = '';
|
||||
try {
|
||||
this.gptEncoder = this.constructor.getTokenizer(this.modelOptions.model, true);
|
||||
} catch {
|
||||
this.gptEncoder = this.constructor.getTokenizer('text-davinci-003', true);
|
||||
}
|
||||
}
|
||||
|
||||
if (!this.modelOptions.stop) {
|
||||
const stopTokens = [this.startToken];
|
||||
if (this.endToken && this.endToken !== this.startToken) {
|
||||
stopTokens.push(this.endToken);
|
||||
}
|
||||
stopTokens.push(`\n${this.userLabel}:`);
|
||||
stopTokens.push('<|diff_marker|>');
|
||||
// I chose not to do one for `chatGptLabel` because I've never seen it happen
|
||||
this.modelOptions.stop = stopTokens;
|
||||
}
|
||||
|
||||
if (this.options.reverseProxyUrl) {
|
||||
this.completionsUrl = this.options.reverseProxyUrl;
|
||||
} else if (isChatGptModel) {
|
||||
this.completionsUrl = 'https://api.openai.com/v1/chat/completions';
|
||||
} else {
|
||||
this.completionsUrl = 'https://api.openai.com/v1/completions';
|
||||
}
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
static getTokenizer(encoding, isModelName = false, extendSpecialTokens = {}) {
|
||||
if (tokenizersCache[encoding]) {
|
||||
return tokenizersCache[encoding];
|
||||
}
|
||||
let tokenizer;
|
||||
if (isModelName) {
|
||||
tokenizer = encodingForModel(encoding, extendSpecialTokens);
|
||||
} else {
|
||||
tokenizer = getEncoding(encoding, extendSpecialTokens);
|
||||
}
|
||||
tokenizersCache[encoding] = tokenizer;
|
||||
return tokenizer;
|
||||
}
|
||||
|
||||
/** @type {getCompletion} */
|
||||
async getCompletion(input, onProgress, onTokenProgress, abortController = null) {
|
||||
if (!abortController) {
|
||||
abortController = new AbortController();
|
||||
}
|
||||
|
||||
let modelOptions = { ...this.modelOptions };
|
||||
if (typeof onProgress === 'function') {
|
||||
modelOptions.stream = true;
|
||||
}
|
||||
if (this.isChatGptModel) {
|
||||
modelOptions.messages = input;
|
||||
} else {
|
||||
modelOptions.prompt = input;
|
||||
}
|
||||
|
||||
if (this.useOpenRouter && modelOptions.prompt) {
|
||||
delete modelOptions.stop;
|
||||
}
|
||||
|
||||
const { debug } = this.options;
|
||||
let baseURL = this.completionsUrl;
|
||||
if (debug) {
|
||||
console.debug();
|
||||
console.debug(baseURL);
|
||||
console.debug(modelOptions);
|
||||
console.debug();
|
||||
}
|
||||
|
||||
const opts = {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
};
|
||||
|
||||
if (this.isVisionModel) {
|
||||
modelOptions.max_tokens = 4000;
|
||||
}
|
||||
|
||||
/** @type {TAzureConfig | undefined} */
|
||||
const azureConfig = this.options?.req?.app?.locals?.[EModelEndpoint.azureOpenAI];
|
||||
|
||||
const isAzure = this.azure || this.options.azure;
|
||||
if (
|
||||
(isAzure && this.isVisionModel && azureConfig) ||
|
||||
(azureConfig && this.isVisionModel && this.options.endpoint === EModelEndpoint.azureOpenAI)
|
||||
) {
|
||||
const { modelGroupMap, groupMap } = azureConfig;
|
||||
const {
|
||||
azureOptions,
|
||||
baseURL,
|
||||
headers = {},
|
||||
serverless,
|
||||
} = mapModelToAzureConfig({
|
||||
modelName: modelOptions.model,
|
||||
modelGroupMap,
|
||||
groupMap,
|
||||
});
|
||||
opts.headers = resolveHeaders(headers);
|
||||
this.langchainProxy = extractBaseURL(baseURL);
|
||||
this.apiKey = azureOptions.azureOpenAIApiKey;
|
||||
|
||||
const groupName = modelGroupMap[modelOptions.model].group;
|
||||
this.options.addParams = azureConfig.groupMap[groupName].addParams;
|
||||
this.options.dropParams = azureConfig.groupMap[groupName].dropParams;
|
||||
// Note: `forcePrompt` not re-assigned as only chat models are vision models
|
||||
|
||||
this.azure = !serverless && azureOptions;
|
||||
this.azureEndpoint =
|
||||
!serverless && genAzureChatCompletion(this.azure, modelOptions.model, this);
|
||||
if (serverless === true) {
|
||||
this.options.defaultQuery = azureOptions.azureOpenAIApiVersion
|
||||
? { 'api-version': azureOptions.azureOpenAIApiVersion }
|
||||
: undefined;
|
||||
this.options.headers['api-key'] = this.apiKey;
|
||||
}
|
||||
}
|
||||
|
||||
if (this.options.defaultQuery) {
|
||||
opts.defaultQuery = this.options.defaultQuery;
|
||||
}
|
||||
|
||||
if (this.options.headers) {
|
||||
opts.headers = { ...opts.headers, ...this.options.headers };
|
||||
}
|
||||
|
||||
if (isAzure) {
|
||||
// Azure does not accept `model` in the body, so we need to remove it.
|
||||
delete modelOptions.model;
|
||||
|
||||
baseURL = this.langchainProxy
|
||||
? constructAzureURL({
|
||||
baseURL: this.langchainProxy,
|
||||
azureOptions: this.azure,
|
||||
})
|
||||
: this.azureEndpoint.split(/(?<!\/)\/(chat|completion)\//)[0];
|
||||
|
||||
if (this.options.forcePrompt) {
|
||||
baseURL += '/completions';
|
||||
} else {
|
||||
baseURL += '/chat/completions';
|
||||
}
|
||||
|
||||
opts.defaultQuery = { 'api-version': this.azure.azureOpenAIApiVersion };
|
||||
opts.headers = { ...opts.headers, 'api-key': this.apiKey };
|
||||
} else if (this.apiKey) {
|
||||
opts.headers.Authorization = `Bearer ${this.apiKey}`;
|
||||
}
|
||||
|
||||
if (process.env.OPENAI_ORGANIZATION) {
|
||||
opts.headers['OpenAI-Organization'] = process.env.OPENAI_ORGANIZATION;
|
||||
}
|
||||
|
||||
if (this.useOpenRouter) {
|
||||
opts.headers['HTTP-Referer'] = 'https://librechat.ai';
|
||||
opts.headers['X-Title'] = 'LibreChat';
|
||||
}
|
||||
|
||||
/* hacky fixes for Mistral AI API:
|
||||
- Re-orders system message to the top of the messages payload, as not allowed anywhere else
|
||||
- If there is only one message and it's a system message, change the role to user
|
||||
*/
|
||||
if (baseURL.includes('https://api.mistral.ai/v1') && modelOptions.messages) {
|
||||
const { messages } = modelOptions;
|
||||
|
||||
const systemMessageIndex = messages.findIndex((msg) => msg.role === 'system');
|
||||
|
||||
if (systemMessageIndex > 0) {
|
||||
const [systemMessage] = messages.splice(systemMessageIndex, 1);
|
||||
messages.unshift(systemMessage);
|
||||
}
|
||||
|
||||
modelOptions.messages = messages;
|
||||
|
||||
if (messages.length === 1 && messages[0].role === 'system') {
|
||||
modelOptions.messages[0].role = 'user';
|
||||
}
|
||||
}
|
||||
|
||||
if (this.options.addParams && typeof this.options.addParams === 'object') {
|
||||
modelOptions = {
|
||||
...modelOptions,
|
||||
...this.options.addParams,
|
||||
};
|
||||
logger.debug('[ChatGPTClient] chatCompletion: added params', {
|
||||
addParams: this.options.addParams,
|
||||
modelOptions,
|
||||
});
|
||||
}
|
||||
|
||||
if (this.options.dropParams && Array.isArray(this.options.dropParams)) {
|
||||
this.options.dropParams.forEach((param) => {
|
||||
delete modelOptions[param];
|
||||
});
|
||||
logger.debug('[ChatGPTClient] chatCompletion: dropped params', {
|
||||
dropParams: this.options.dropParams,
|
||||
modelOptions,
|
||||
});
|
||||
}
|
||||
|
||||
if (baseURL.startsWith(CohereConstants.API_URL)) {
|
||||
const payload = createCoherePayload({ modelOptions });
|
||||
return await this.cohereChatCompletion({ payload, onTokenProgress });
|
||||
}
|
||||
|
||||
if (baseURL.includes('v1') && !baseURL.includes('/completions') && !this.isChatCompletion) {
|
||||
baseURL = baseURL.split('v1')[0] + 'v1/completions';
|
||||
} else if (
|
||||
baseURL.includes('v1') &&
|
||||
!baseURL.includes('/chat/completions') &&
|
||||
this.isChatCompletion
|
||||
) {
|
||||
baseURL = baseURL.split('v1')[0] + 'v1/chat/completions';
|
||||
}
|
||||
|
||||
const BASE_URL = new URL(baseURL);
|
||||
if (opts.defaultQuery) {
|
||||
Object.entries(opts.defaultQuery).forEach(([key, value]) => {
|
||||
BASE_URL.searchParams.append(key, value);
|
||||
});
|
||||
delete opts.defaultQuery;
|
||||
}
|
||||
|
||||
const completionsURL = BASE_URL.toString();
|
||||
opts.body = JSON.stringify(modelOptions);
|
||||
|
||||
if (modelOptions.stream) {
|
||||
return new Promise(async (resolve, reject) => {
|
||||
try {
|
||||
let done = false;
|
||||
await fetchEventSource(completionsURL, {
|
||||
...opts,
|
||||
signal: abortController.signal,
|
||||
async onopen(response) {
|
||||
if (response.status === 200) {
|
||||
return;
|
||||
}
|
||||
if (debug) {
|
||||
console.debug(response);
|
||||
}
|
||||
let error;
|
||||
try {
|
||||
const body = await response.text();
|
||||
error = new Error(`Failed to send message. HTTP ${response.status} - ${body}`);
|
||||
error.status = response.status;
|
||||
error.json = JSON.parse(body);
|
||||
} catch {
|
||||
error = error || new Error(`Failed to send message. HTTP ${response.status}`);
|
||||
}
|
||||
throw error;
|
||||
},
|
||||
onclose() {
|
||||
if (debug) {
|
||||
console.debug('Server closed the connection unexpectedly, returning...');
|
||||
}
|
||||
// workaround for private API not sending [DONE] event
|
||||
if (!done) {
|
||||
onProgress('[DONE]');
|
||||
resolve();
|
||||
}
|
||||
},
|
||||
onerror(err) {
|
||||
if (debug) {
|
||||
console.debug(err);
|
||||
}
|
||||
// rethrow to stop the operation
|
||||
throw err;
|
||||
},
|
||||
onmessage(message) {
|
||||
if (debug) {
|
||||
console.debug(message);
|
||||
}
|
||||
if (!message.data || message.event === 'ping') {
|
||||
return;
|
||||
}
|
||||
if (message.data === '[DONE]') {
|
||||
onProgress('[DONE]');
|
||||
resolve();
|
||||
done = true;
|
||||
return;
|
||||
}
|
||||
onProgress(JSON.parse(message.data));
|
||||
},
|
||||
});
|
||||
} catch (err) {
|
||||
reject(err);
|
||||
}
|
||||
});
|
||||
}
|
||||
const response = await fetch(completionsURL, {
|
||||
...opts,
|
||||
signal: abortController.signal,
|
||||
});
|
||||
if (response.status !== 200) {
|
||||
const body = await response.text();
|
||||
const error = new Error(`Failed to send message. HTTP ${response.status} - ${body}`);
|
||||
error.status = response.status;
|
||||
try {
|
||||
error.json = JSON.parse(body);
|
||||
} catch {
|
||||
error.body = body;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
return response.json();
|
||||
}
|
||||
|
||||
/** @type {cohereChatCompletion} */
|
||||
async cohereChatCompletion({ payload, onTokenProgress }) {
|
||||
const cohere = new CohereClient({
|
||||
token: this.apiKey,
|
||||
environment: this.completionsUrl,
|
||||
});
|
||||
|
||||
if (!payload.stream) {
|
||||
const chatResponse = await cohere.chat(payload);
|
||||
return chatResponse.text;
|
||||
}
|
||||
|
||||
const chatStream = await cohere.chatStream(payload);
|
||||
let reply = '';
|
||||
for await (const message of chatStream) {
|
||||
if (!message) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (message.eventType === 'text-generation' && message.text) {
|
||||
onTokenProgress(message.text);
|
||||
reply += message.text;
|
||||
}
|
||||
/*
|
||||
Cohere API Chinese Unicode character replacement hotfix.
|
||||
Should be un-commented when the following issue is resolved:
|
||||
https://github.com/cohere-ai/cohere-typescript/issues/151
|
||||
|
||||
else if (message.eventType === 'stream-end' && message.response) {
|
||||
reply = message.response.text;
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
return reply;
|
||||
}
|
||||
|
||||
async generateTitle(userMessage, botMessage) {
|
||||
const instructionsPayload = {
|
||||
role: 'system',
|
||||
content: `Write an extremely concise subtitle for this conversation with no more than a few words. All words should be capitalized. Exclude punctuation.
|
||||
|
||||
||>Message:
|
||||
${userMessage.message}
|
||||
||>Response:
|
||||
${botMessage.message}
|
||||
|
||||
||>Title:`,
|
||||
};
|
||||
|
||||
const titleGenClientOptions = JSON.parse(JSON.stringify(this.options));
|
||||
titleGenClientOptions.modelOptions = {
|
||||
model: 'gpt-3.5-turbo',
|
||||
temperature: 0,
|
||||
presence_penalty: 0,
|
||||
frequency_penalty: 0,
|
||||
};
|
||||
const titleGenClient = new ChatGPTClient(this.apiKey, titleGenClientOptions);
|
||||
const result = await titleGenClient.getCompletion([instructionsPayload], null);
|
||||
// remove any non-alphanumeric characters, replace multiple spaces with 1, and then trim
|
||||
return result.choices[0].message.content
|
||||
.replace(/[^a-zA-Z0-9' ]/g, '')
|
||||
.replace(/\s+/g, ' ')
|
||||
.trim();
|
||||
}
|
||||
|
||||
async sendMessage(message, opts = {}) {
|
||||
if (opts.clientOptions && typeof opts.clientOptions === 'object') {
|
||||
this.setOptions(opts.clientOptions);
|
||||
}
|
||||
|
||||
const conversationId = opts.conversationId || crypto.randomUUID();
|
||||
const parentMessageId = opts.parentMessageId || crypto.randomUUID();
|
||||
|
||||
let conversation =
|
||||
typeof opts.conversation === 'object'
|
||||
? opts.conversation
|
||||
: await this.conversationsCache.get(conversationId);
|
||||
|
||||
let isNewConversation = false;
|
||||
if (!conversation) {
|
||||
conversation = {
|
||||
messages: [],
|
||||
createdAt: Date.now(),
|
||||
};
|
||||
isNewConversation = true;
|
||||
}
|
||||
|
||||
const shouldGenerateTitle = opts.shouldGenerateTitle && isNewConversation;
|
||||
|
||||
const userMessage = {
|
||||
id: crypto.randomUUID(),
|
||||
parentMessageId,
|
||||
role: 'User',
|
||||
message,
|
||||
};
|
||||
conversation.messages.push(userMessage);
|
||||
|
||||
// Doing it this way instead of having each message be a separate element in the array seems to be more reliable,
|
||||
// especially when it comes to keeping the AI in character. It also seems to improve coherency and context retention.
|
||||
const { prompt: payload, context } = await this.buildPrompt(
|
||||
conversation.messages,
|
||||
userMessage.id,
|
||||
{
|
||||
isChatGptModel: this.isChatGptModel,
|
||||
promptPrefix: opts.promptPrefix,
|
||||
},
|
||||
);
|
||||
|
||||
if (this.options.keepNecessaryMessagesOnly) {
|
||||
conversation.messages = context;
|
||||
}
|
||||
|
||||
let reply = '';
|
||||
let result = null;
|
||||
if (typeof opts.onProgress === 'function') {
|
||||
await this.getCompletion(
|
||||
payload,
|
||||
(progressMessage) => {
|
||||
if (progressMessage === '[DONE]') {
|
||||
return;
|
||||
}
|
||||
const token = this.isChatGptModel
|
||||
? progressMessage.choices[0].delta.content
|
||||
: progressMessage.choices[0].text;
|
||||
// first event's delta content is always undefined
|
||||
if (!token) {
|
||||
return;
|
||||
}
|
||||
if (this.options.debug) {
|
||||
console.debug(token);
|
||||
}
|
||||
if (token === this.endToken) {
|
||||
return;
|
||||
}
|
||||
opts.onProgress(token);
|
||||
reply += token;
|
||||
},
|
||||
opts.abortController || new AbortController(),
|
||||
);
|
||||
} else {
|
||||
result = await this.getCompletion(
|
||||
payload,
|
||||
null,
|
||||
opts.abortController || new AbortController(),
|
||||
);
|
||||
if (this.options.debug) {
|
||||
console.debug(JSON.stringify(result));
|
||||
}
|
||||
if (this.isChatGptModel) {
|
||||
reply = result.choices[0].message.content;
|
||||
} else {
|
||||
reply = result.choices[0].text.replace(this.endToken, '');
|
||||
}
|
||||
}
|
||||
|
||||
// avoids some rendering issues when using the CLI app
|
||||
if (this.options.debug) {
|
||||
console.debug();
|
||||
}
|
||||
|
||||
reply = reply.trim();
|
||||
|
||||
const replyMessage = {
|
||||
id: crypto.randomUUID(),
|
||||
parentMessageId: userMessage.id,
|
||||
role: 'ChatGPT',
|
||||
message: reply,
|
||||
};
|
||||
conversation.messages.push(replyMessage);
|
||||
|
||||
const returnData = {
|
||||
response: replyMessage.message,
|
||||
conversationId,
|
||||
parentMessageId: replyMessage.parentMessageId,
|
||||
messageId: replyMessage.id,
|
||||
details: result || {},
|
||||
};
|
||||
|
||||
if (shouldGenerateTitle) {
|
||||
conversation.title = await this.generateTitle(userMessage, replyMessage);
|
||||
returnData.title = conversation.title;
|
||||
}
|
||||
|
||||
await this.conversationsCache.set(conversationId, conversation);
|
||||
|
||||
if (this.options.returnConversation) {
|
||||
returnData.conversation = conversation;
|
||||
}
|
||||
|
||||
return returnData;
|
||||
}
|
||||
|
||||
async buildPrompt(messages, { isChatGptModel = false, promptPrefix = null }) {
|
||||
promptPrefix = (promptPrefix || this.options.promptPrefix || '').trim();
|
||||
|
||||
// Handle attachments and create augmentedPrompt
|
||||
if (this.options.attachments) {
|
||||
const attachments = await this.options.attachments;
|
||||
const lastMessage = messages[messages.length - 1];
|
||||
|
||||
if (this.message_file_map) {
|
||||
this.message_file_map[lastMessage.messageId] = attachments;
|
||||
} else {
|
||||
this.message_file_map = {
|
||||
[lastMessage.messageId]: attachments,
|
||||
};
|
||||
}
|
||||
|
||||
const files = await this.addImageURLs(lastMessage, attachments);
|
||||
this.options.attachments = files;
|
||||
|
||||
this.contextHandlers = createContextHandlers(this.options.req, lastMessage.text);
|
||||
}
|
||||
|
||||
if (this.message_file_map) {
|
||||
this.contextHandlers = createContextHandlers(
|
||||
this.options.req,
|
||||
messages[messages.length - 1].text,
|
||||
);
|
||||
}
|
||||
|
||||
// Calculate image token cost and process embedded files
|
||||
messages.forEach((message, i) => {
|
||||
if (this.message_file_map && this.message_file_map[message.messageId]) {
|
||||
const attachments = this.message_file_map[message.messageId];
|
||||
for (const file of attachments) {
|
||||
if (file.embedded) {
|
||||
this.contextHandlers?.processFile(file);
|
||||
continue;
|
||||
}
|
||||
|
||||
messages[i].tokenCount =
|
||||
(messages[i].tokenCount || 0) +
|
||||
this.calculateImageTokenCost({
|
||||
width: file.width,
|
||||
height: file.height,
|
||||
detail: this.options.imageDetail ?? ImageDetail.auto,
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if (this.contextHandlers) {
|
||||
this.augmentedPrompt = await this.contextHandlers.createContext();
|
||||
promptPrefix = this.augmentedPrompt + promptPrefix;
|
||||
}
|
||||
|
||||
if (promptPrefix) {
|
||||
// If the prompt prefix doesn't end with the end token, add it.
|
||||
if (!promptPrefix.endsWith(`${this.endToken}`)) {
|
||||
promptPrefix = `${promptPrefix.trim()}${this.endToken}\n\n`;
|
||||
}
|
||||
promptPrefix = `${this.startToken}Instructions:\n${promptPrefix}`;
|
||||
}
|
||||
const promptSuffix = `${this.startToken}${this.chatGptLabel}:\n`; // Prompt ChatGPT to respond.
|
||||
|
||||
const instructionsPayload = {
|
||||
role: 'system',
|
||||
content: promptPrefix,
|
||||
};
|
||||
|
||||
const messagePayload = {
|
||||
role: 'system',
|
||||
content: promptSuffix,
|
||||
};
|
||||
|
||||
let currentTokenCount;
|
||||
if (isChatGptModel) {
|
||||
currentTokenCount =
|
||||
this.getTokenCountForMessage(instructionsPayload) +
|
||||
this.getTokenCountForMessage(messagePayload);
|
||||
} else {
|
||||
currentTokenCount = this.getTokenCount(`${promptPrefix}${promptSuffix}`);
|
||||
}
|
||||
let promptBody = '';
|
||||
const maxTokenCount = this.maxPromptTokens;
|
||||
|
||||
const context = [];
|
||||
|
||||
// Iterate backwards through the messages, adding them to the prompt until we reach the max token count.
|
||||
// Do this within a recursive async function so that it doesn't block the event loop for too long.
|
||||
const buildPromptBody = async () => {
|
||||
if (currentTokenCount < maxTokenCount && messages.length > 0) {
|
||||
const message = messages.pop();
|
||||
const roleLabel =
|
||||
message?.isCreatedByUser || message?.role?.toLowerCase() === 'user'
|
||||
? this.userLabel
|
||||
: this.chatGptLabel;
|
||||
const messageString = `${this.startToken}${roleLabel}:\n${
|
||||
message?.text ?? message?.message
|
||||
}${this.endToken}\n`;
|
||||
let newPromptBody;
|
||||
if (promptBody || isChatGptModel) {
|
||||
newPromptBody = `${messageString}${promptBody}`;
|
||||
} else {
|
||||
// Always insert prompt prefix before the last user message, if not gpt-3.5-turbo.
|
||||
// This makes the AI obey the prompt instructions better, which is important for custom instructions.
|
||||
// After a bunch of testing, it doesn't seem to cause the AI any confusion, even if you ask it things
|
||||
// like "what's the last thing I wrote?".
|
||||
newPromptBody = `${promptPrefix}${messageString}${promptBody}`;
|
||||
}
|
||||
|
||||
context.unshift(message);
|
||||
|
||||
const tokenCountForMessage = this.getTokenCount(messageString);
|
||||
const newTokenCount = currentTokenCount + tokenCountForMessage;
|
||||
if (newTokenCount > maxTokenCount) {
|
||||
if (promptBody) {
|
||||
// This message would put us over the token limit, so don't add it.
|
||||
return false;
|
||||
}
|
||||
// This is the first message, so we can't add it. Just throw an error.
|
||||
throw new Error(
|
||||
`Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`,
|
||||
);
|
||||
}
|
||||
promptBody = newPromptBody;
|
||||
currentTokenCount = newTokenCount;
|
||||
// wait for next tick to avoid blocking the event loop
|
||||
await new Promise((resolve) => setImmediate(resolve));
|
||||
return buildPromptBody();
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
await buildPromptBody();
|
||||
|
||||
const prompt = `${promptBody}${promptSuffix}`;
|
||||
if (isChatGptModel) {
|
||||
messagePayload.content = prompt;
|
||||
// Add 3 tokens for Assistant Label priming after all messages have been counted.
|
||||
currentTokenCount += 3;
|
||||
}
|
||||
|
||||
// Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response.
|
||||
this.modelOptions.max_tokens = Math.min(
|
||||
this.maxContextTokens - currentTokenCount,
|
||||
this.maxResponseTokens,
|
||||
);
|
||||
|
||||
if (isChatGptModel) {
|
||||
return { prompt: [instructionsPayload, messagePayload], context };
|
||||
}
|
||||
return { prompt, context, promptTokens: currentTokenCount };
|
||||
}
|
||||
|
||||
getTokenCount(text) {
|
||||
return this.gptEncoder.encode(text, 'all').length;
|
||||
}
|
||||
|
||||
/**
|
||||
* Algorithm adapted from "6. Counting tokens for chat API calls" of
|
||||
* https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
||||
*
|
||||
* An additional 3 tokens need to be added for assistant label priming after all messages have been counted.
|
||||
*
|
||||
* @param {Object} message
|
||||
*/
|
||||
getTokenCountForMessage(message) {
|
||||
// Note: gpt-3.5-turbo and gpt-4 may update over time. Use default for these as well as for unknown models
|
||||
let tokensPerMessage = 3;
|
||||
let tokensPerName = 1;
|
||||
|
||||
if (this.modelOptions.model === 'gpt-3.5-turbo-0301') {
|
||||
tokensPerMessage = 4;
|
||||
tokensPerName = -1;
|
||||
}
|
||||
|
||||
let numTokens = tokensPerMessage;
|
||||
for (let [key, value] of Object.entries(message)) {
|
||||
numTokens += this.getTokenCount(value);
|
||||
if (key === 'name') {
|
||||
numTokens += tokensPerName;
|
||||
}
|
||||
}
|
||||
|
||||
return numTokens;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = ChatGPTClient;
|
||||
@@ -1,7 +1,7 @@
|
||||
const { google } = require('googleapis');
|
||||
const { Tokenizer } = require('@librechat/api');
|
||||
const { concat } = require('@langchain/core/utils/stream');
|
||||
const { ChatVertexAI } = require('@langchain/google-vertexai');
|
||||
const { Tokenizer, getSafetySettings } = require('@librechat/api');
|
||||
const { ChatGoogleGenerativeAI } = require('@langchain/google-genai');
|
||||
const { GoogleGenerativeAI: GenAI } = require('@google/generative-ai');
|
||||
const { HumanMessage, SystemMessage } = require('@langchain/core/messages');
|
||||
@@ -12,13 +12,13 @@ const {
|
||||
endpointSettings,
|
||||
parseTextParts,
|
||||
EModelEndpoint,
|
||||
googleSettings,
|
||||
ContentTypes,
|
||||
VisionModes,
|
||||
ErrorTypes,
|
||||
Constants,
|
||||
AuthKeys,
|
||||
} = require('librechat-data-provider');
|
||||
const { getSafetySettings } = require('~/server/services/Endpoints/google/llm');
|
||||
const { encodeAndFormat } = require('~/server/services/Files/images');
|
||||
const { spendTokens } = require('~/models/spendTokens');
|
||||
const { getModelMaxTokens } = require('~/utils');
|
||||
@@ -166,6 +166,16 @@ class GoogleClient extends BaseClient {
|
||||
);
|
||||
}
|
||||
|
||||
// Add thinking configuration
|
||||
this.modelOptions.thinkingConfig = {
|
||||
thinkingBudget:
|
||||
(this.modelOptions.thinking ?? googleSettings.thinking.default)
|
||||
? this.modelOptions.thinkingBudget
|
||||
: 0,
|
||||
};
|
||||
delete this.modelOptions.thinking;
|
||||
delete this.modelOptions.thinkingBudget;
|
||||
|
||||
this.sender =
|
||||
this.options.sender ??
|
||||
getResponseSender({
|
||||
|
||||
@@ -5,6 +5,7 @@ const {
|
||||
isEnabled,
|
||||
Tokenizer,
|
||||
createFetch,
|
||||
resolveHeaders,
|
||||
constructAzureURL,
|
||||
genAzureChatCompletion,
|
||||
createStreamEventHandlers,
|
||||
@@ -15,7 +16,6 @@ const {
|
||||
ContentTypes,
|
||||
parseTextParts,
|
||||
EModelEndpoint,
|
||||
resolveHeaders,
|
||||
KnownEndpoints,
|
||||
openAISettings,
|
||||
ImageDetailCost,
|
||||
@@ -37,7 +37,6 @@ const { addSpaceIfNeeded, sleep } = require('~/server/utils');
|
||||
const { spendTokens } = require('~/models/spendTokens');
|
||||
const { handleOpenAIErrors } = require('./tools/util');
|
||||
const { createLLM, RunManager } = require('./llm');
|
||||
const ChatGPTClient = require('./ChatGPTClient');
|
||||
const { summaryBuffer } = require('./memory');
|
||||
const { runTitleChain } = require('./chains');
|
||||
const { tokenSplit } = require('./document');
|
||||
@@ -47,12 +46,6 @@ const { logger } = require('~/config');
|
||||
class OpenAIClient extends BaseClient {
|
||||
constructor(apiKey, options = {}) {
|
||||
super(apiKey, options);
|
||||
this.ChatGPTClient = new ChatGPTClient();
|
||||
this.buildPrompt = this.ChatGPTClient.buildPrompt.bind(this);
|
||||
/** @type {getCompletion} */
|
||||
this.getCompletion = this.ChatGPTClient.getCompletion.bind(this);
|
||||
/** @type {cohereChatCompletion} */
|
||||
this.cohereChatCompletion = this.ChatGPTClient.cohereChatCompletion.bind(this);
|
||||
this.contextStrategy = options.contextStrategy
|
||||
? options.contextStrategy.toLowerCase()
|
||||
: 'discard';
|
||||
@@ -379,23 +372,12 @@ class OpenAIClient extends BaseClient {
|
||||
return files;
|
||||
}
|
||||
|
||||
async buildMessages(
|
||||
messages,
|
||||
parentMessageId,
|
||||
{ isChatCompletion = false, promptPrefix = null },
|
||||
opts,
|
||||
) {
|
||||
async buildMessages(messages, parentMessageId, { promptPrefix = null }, opts) {
|
||||
let orderedMessages = this.constructor.getMessagesForConversation({
|
||||
messages,
|
||||
parentMessageId,
|
||||
summary: this.shouldSummarize,
|
||||
});
|
||||
if (!isChatCompletion) {
|
||||
return await this.buildPrompt(orderedMessages, {
|
||||
isChatGptModel: isChatCompletion,
|
||||
promptPrefix,
|
||||
});
|
||||
}
|
||||
|
||||
let payload;
|
||||
let instructions;
|
||||
|
||||
@@ -1,542 +0,0 @@
|
||||
const OpenAIClient = require('./OpenAIClient');
|
||||
const { CallbackManager } = require('@langchain/core/callbacks/manager');
|
||||
const { BufferMemory, ChatMessageHistory } = require('langchain/memory');
|
||||
const { addImages, buildErrorInput, buildPromptPrefix } = require('./output_parsers');
|
||||
const { initializeCustomAgent, initializeFunctionsAgent } = require('./agents');
|
||||
const { processFileURL } = require('~/server/services/Files/process');
|
||||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
const { checkBalance } = require('~/models/balanceMethods');
|
||||
const { formatLangChainMessages } = require('./prompts');
|
||||
const { extractBaseURL } = require('~/utils');
|
||||
const { loadTools } = require('./tools/util');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class PluginsClient extends OpenAIClient {
|
||||
constructor(apiKey, options = {}) {
|
||||
super(apiKey, options);
|
||||
this.sender = options.sender ?? 'Assistant';
|
||||
this.tools = [];
|
||||
this.actions = [];
|
||||
this.setOptions(options);
|
||||
this.openAIApiKey = this.apiKey;
|
||||
this.executor = null;
|
||||
}
|
||||
|
||||
setOptions(options) {
|
||||
this.agentOptions = { ...options.agentOptions };
|
||||
this.functionsAgent = this.agentOptions?.agent === 'functions';
|
||||
this.agentIsGpt3 = this.agentOptions?.model?.includes('gpt-3');
|
||||
|
||||
super.setOptions(options);
|
||||
|
||||
this.isGpt3 = this.modelOptions?.model?.includes('gpt-3');
|
||||
|
||||
if (this.options.reverseProxyUrl) {
|
||||
this.langchainProxy = extractBaseURL(this.options.reverseProxyUrl);
|
||||
}
|
||||
}
|
||||
|
||||
getSaveOptions() {
|
||||
return {
|
||||
artifacts: this.options.artifacts,
|
||||
chatGptLabel: this.options.chatGptLabel,
|
||||
modelLabel: this.options.modelLabel,
|
||||
promptPrefix: this.options.promptPrefix,
|
||||
tools: this.options.tools,
|
||||
...this.modelOptions,
|
||||
agentOptions: this.agentOptions,
|
||||
iconURL: this.options.iconURL,
|
||||
greeting: this.options.greeting,
|
||||
spec: this.options.spec,
|
||||
};
|
||||
}
|
||||
|
||||
saveLatestAction(action) {
|
||||
this.actions.push(action);
|
||||
}
|
||||
|
||||
getFunctionModelName(input) {
|
||||
if (/-(?!0314)\d{4}/.test(input)) {
|
||||
return input;
|
||||
} else if (input.includes('gpt-3.5-turbo')) {
|
||||
return 'gpt-3.5-turbo';
|
||||
} else if (input.includes('gpt-4')) {
|
||||
return 'gpt-4';
|
||||
} else {
|
||||
return 'gpt-3.5-turbo';
|
||||
}
|
||||
}
|
||||
|
||||
getBuildMessagesOptions(opts) {
|
||||
return {
|
||||
isChatCompletion: true,
|
||||
promptPrefix: opts.promptPrefix,
|
||||
abortController: opts.abortController,
|
||||
};
|
||||
}
|
||||
|
||||
async initialize({ user, message, onAgentAction, onChainEnd, signal }) {
|
||||
const modelOptions = {
|
||||
modelName: this.agentOptions.model,
|
||||
temperature: this.agentOptions.temperature,
|
||||
};
|
||||
|
||||
const model = this.initializeLLM({
|
||||
...modelOptions,
|
||||
context: 'plugins',
|
||||
initialMessageCount: this.currentMessages.length + 1,
|
||||
});
|
||||
|
||||
logger.debug(
|
||||
`[PluginsClient] Agent Model: ${model.modelName} | Temp: ${model.temperature} | Functions: ${this.functionsAgent}`,
|
||||
);
|
||||
|
||||
// Map Messages to Langchain format
|
||||
const pastMessages = formatLangChainMessages(this.currentMessages.slice(0, -1), {
|
||||
userName: this.options?.name,
|
||||
});
|
||||
logger.debug('[PluginsClient] pastMessages: ' + pastMessages.length);
|
||||
|
||||
// TODO: use readOnly memory, TokenBufferMemory? (both unavailable in LangChainJS)
|
||||
const memory = new BufferMemory({
|
||||
llm: model,
|
||||
chatHistory: new ChatMessageHistory(pastMessages),
|
||||
});
|
||||
|
||||
const { loadedTools } = await loadTools({
|
||||
user,
|
||||
model,
|
||||
tools: this.options.tools,
|
||||
functions: this.functionsAgent,
|
||||
options: {
|
||||
memory,
|
||||
signal: this.abortController.signal,
|
||||
openAIApiKey: this.openAIApiKey,
|
||||
conversationId: this.conversationId,
|
||||
fileStrategy: this.options.req.app.locals.fileStrategy,
|
||||
processFileURL,
|
||||
message,
|
||||
},
|
||||
useSpecs: true,
|
||||
});
|
||||
|
||||
if (loadedTools.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.tools = loadedTools;
|
||||
|
||||
logger.debug('[PluginsClient] Requested Tools', this.options.tools);
|
||||
logger.debug(
|
||||
'[PluginsClient] Loaded Tools',
|
||||
this.tools.map((tool) => tool.name),
|
||||
);
|
||||
|
||||
const handleAction = (action, runId, callback = null) => {
|
||||
this.saveLatestAction(action);
|
||||
|
||||
logger.debug('[PluginsClient] Latest Agent Action ', this.actions[this.actions.length - 1]);
|
||||
|
||||
if (typeof callback === 'function') {
|
||||
callback(action, runId);
|
||||
}
|
||||
};
|
||||
|
||||
// initialize agent
|
||||
const initializer = this.functionsAgent ? initializeFunctionsAgent : initializeCustomAgent;
|
||||
|
||||
let customInstructions = (this.options.promptPrefix ?? '').trim();
|
||||
if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) {
|
||||
customInstructions = `${customInstructions ?? ''}\n${this.options.artifactsPrompt}`.trim();
|
||||
}
|
||||
|
||||
this.executor = await initializer({
|
||||
model,
|
||||
signal,
|
||||
pastMessages,
|
||||
tools: this.tools,
|
||||
customInstructions,
|
||||
verbose: this.options.debug,
|
||||
returnIntermediateSteps: true,
|
||||
customName: this.options.chatGptLabel,
|
||||
currentDateString: this.currentDateString,
|
||||
callbackManager: CallbackManager.fromHandlers({
|
||||
async handleAgentAction(action, runId) {
|
||||
handleAction(action, runId, onAgentAction);
|
||||
},
|
||||
async handleChainEnd(action) {
|
||||
if (typeof onChainEnd === 'function') {
|
||||
onChainEnd(action);
|
||||
}
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
logger.debug('[PluginsClient] Loaded agent.');
|
||||
}
|
||||
|
||||
async executorCall(message, { signal, stream, onToolStart, onToolEnd }) {
|
||||
let errorMessage = '';
|
||||
const maxAttempts = 1;
|
||||
|
||||
for (let attempts = 1; attempts <= maxAttempts; attempts++) {
|
||||
const errorInput = buildErrorInput({
|
||||
message,
|
||||
errorMessage,
|
||||
actions: this.actions,
|
||||
functionsAgent: this.functionsAgent,
|
||||
});
|
||||
const input = attempts > 1 ? errorInput : message;
|
||||
|
||||
logger.debug(`[PluginsClient] Attempt ${attempts} of ${maxAttempts}`);
|
||||
|
||||
if (errorMessage.length > 0) {
|
||||
logger.debug('[PluginsClient] Caught error, input: ' + JSON.stringify(input));
|
||||
}
|
||||
|
||||
try {
|
||||
this.result = await this.executor.call({ input, signal }, [
|
||||
{
|
||||
async handleToolStart(...args) {
|
||||
await onToolStart(...args);
|
||||
},
|
||||
async handleToolEnd(...args) {
|
||||
await onToolEnd(...args);
|
||||
},
|
||||
async handleLLMEnd(output) {
|
||||
const { generations } = output;
|
||||
const { text } = generations[0][0];
|
||||
if (text && typeof stream === 'function') {
|
||||
await stream(text);
|
||||
}
|
||||
},
|
||||
},
|
||||
]);
|
||||
break; // Exit the loop if the function call is successful
|
||||
} catch (err) {
|
||||
logger.error('[PluginsClient] executorCall error:', err);
|
||||
if (attempts === maxAttempts) {
|
||||
const { run } = this.runManager.getRunByConversationId(this.conversationId);
|
||||
const defaultOutput = `Encountered an error while attempting to respond: ${err.message}`;
|
||||
this.result.output = run && run.error ? run.error : defaultOutput;
|
||||
this.result.errorMessage = run && run.error ? run.error : err.message;
|
||||
this.result.intermediateSteps = this.actions;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {TMessage} responseMessage
|
||||
* @param {Partial<TMessage>} saveOptions
|
||||
* @param {string} user
|
||||
* @returns
|
||||
*/
|
||||
async handleResponseMessage(responseMessage, saveOptions, user) {
|
||||
const { output, errorMessage, ...result } = this.result;
|
||||
logger.debug('[PluginsClient][handleResponseMessage] Output:', {
|
||||
output,
|
||||
errorMessage,
|
||||
...result,
|
||||
});
|
||||
const { error } = responseMessage;
|
||||
if (!error) {
|
||||
responseMessage.tokenCount = this.getTokenCountForResponse(responseMessage);
|
||||
responseMessage.completionTokens = this.getTokenCount(responseMessage.text);
|
||||
}
|
||||
|
||||
// Record usage only when completion is skipped as it is already recorded in the agent phase.
|
||||
if (!this.agentOptions.skipCompletion && !error) {
|
||||
await this.recordTokenUsage(responseMessage);
|
||||
}
|
||||
|
||||
const databasePromise = this.saveMessageToDatabase(responseMessage, saveOptions, user);
|
||||
delete responseMessage.tokenCount;
|
||||
return { ...responseMessage, ...result, databasePromise };
|
||||
}
|
||||
|
||||
async sendMessage(message, opts = {}) {
|
||||
/** @type {Promise<TMessage>} */
|
||||
let userMessagePromise;
|
||||
/** @type {{ filteredTools: string[], includedTools: string[] }} */
|
||||
const { filteredTools = [], includedTools = [] } = this.options.req.app.locals;
|
||||
|
||||
if (includedTools.length > 0) {
|
||||
const tools = this.options.tools.filter((plugin) => includedTools.includes(plugin));
|
||||
this.options.tools = tools;
|
||||
} else {
|
||||
const tools = this.options.tools.filter((plugin) => !filteredTools.includes(plugin));
|
||||
this.options.tools = tools;
|
||||
}
|
||||
|
||||
// If a message is edited, no tools can be used.
|
||||
const completionMode = this.options.tools.length === 0 || opts.isEdited;
|
||||
if (completionMode) {
|
||||
this.setOptions(opts);
|
||||
return super.sendMessage(message, opts);
|
||||
}
|
||||
|
||||
logger.debug('[PluginsClient] sendMessage', { userMessageText: message, opts });
|
||||
const {
|
||||
user,
|
||||
conversationId,
|
||||
responseMessageId,
|
||||
saveOptions,
|
||||
userMessage,
|
||||
onAgentAction,
|
||||
onChainEnd,
|
||||
onToolStart,
|
||||
onToolEnd,
|
||||
} = await this.handleStartMethods(message, opts);
|
||||
|
||||
if (opts.progressCallback) {
|
||||
opts.onProgress = opts.progressCallback.call(null, {
|
||||
...(opts.progressOptions ?? {}),
|
||||
parentMessageId: userMessage.messageId,
|
||||
messageId: responseMessageId,
|
||||
});
|
||||
}
|
||||
|
||||
this.currentMessages.push(userMessage);
|
||||
|
||||
let {
|
||||
prompt: payload,
|
||||
tokenCountMap,
|
||||
promptTokens,
|
||||
} = await this.buildMessages(
|
||||
this.currentMessages,
|
||||
userMessage.messageId,
|
||||
this.getBuildMessagesOptions({
|
||||
promptPrefix: null,
|
||||
abortController: this.abortController,
|
||||
}),
|
||||
);
|
||||
|
||||
if (tokenCountMap) {
|
||||
logger.debug('[PluginsClient] tokenCountMap', { tokenCountMap });
|
||||
if (tokenCountMap[userMessage.messageId]) {
|
||||
userMessage.tokenCount = tokenCountMap[userMessage.messageId];
|
||||
logger.debug('[PluginsClient] userMessage.tokenCount', userMessage.tokenCount);
|
||||
}
|
||||
this.handleTokenCountMap(tokenCountMap);
|
||||
}
|
||||
|
||||
this.result = {};
|
||||
if (payload) {
|
||||
this.currentMessages = payload;
|
||||
}
|
||||
|
||||
if (!this.skipSaveUserMessage) {
|
||||
userMessagePromise = this.saveMessageToDatabase(userMessage, saveOptions, user);
|
||||
if (typeof opts?.getReqData === 'function') {
|
||||
opts.getReqData({
|
||||
userMessagePromise,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const balance = this.options.req?.app?.locals?.balance;
|
||||
if (balance?.enabled) {
|
||||
await checkBalance({
|
||||
req: this.options.req,
|
||||
res: this.options.res,
|
||||
txData: {
|
||||
user: this.user,
|
||||
tokenType: 'prompt',
|
||||
amount: promptTokens,
|
||||
debug: this.options.debug,
|
||||
model: this.modelOptions.model,
|
||||
endpoint: EModelEndpoint.openAI,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
const responseMessage = {
|
||||
endpoint: EModelEndpoint.gptPlugins,
|
||||
iconURL: this.options.iconURL,
|
||||
messageId: responseMessageId,
|
||||
conversationId,
|
||||
parentMessageId: userMessage.messageId,
|
||||
isCreatedByUser: false,
|
||||
model: this.modelOptions.model,
|
||||
sender: this.sender,
|
||||
promptTokens,
|
||||
};
|
||||
|
||||
await this.initialize({
|
||||
user,
|
||||
message,
|
||||
onAgentAction,
|
||||
onChainEnd,
|
||||
signal: this.abortController.signal,
|
||||
onProgress: opts.onProgress,
|
||||
});
|
||||
|
||||
// const stream = async (text) => {
|
||||
// await this.generateTextStream.call(this, text, opts.onProgress, { delay: 1 });
|
||||
// };
|
||||
await this.executorCall(message, {
|
||||
signal: this.abortController.signal,
|
||||
// stream,
|
||||
onToolStart,
|
||||
onToolEnd,
|
||||
});
|
||||
|
||||
// If message was aborted mid-generation
|
||||
if (this.result?.errorMessage?.length > 0 && this.result?.errorMessage?.includes('cancel')) {
|
||||
responseMessage.text = 'Cancelled.';
|
||||
return await this.handleResponseMessage(responseMessage, saveOptions, user);
|
||||
}
|
||||
|
||||
// If error occurred during generation (likely token_balance)
|
||||
if (this.result?.errorMessage?.length > 0) {
|
||||
responseMessage.error = true;
|
||||
responseMessage.text = this.result.output;
|
||||
return await this.handleResponseMessage(responseMessage, saveOptions, user);
|
||||
}
|
||||
|
||||
if (this.agentOptions.skipCompletion && this.result.output && this.functionsAgent) {
|
||||
const partialText = opts.getPartialText();
|
||||
const trimmedPartial = opts.getPartialText().replaceAll(':::plugin:::\n', '');
|
||||
responseMessage.text =
|
||||
trimmedPartial.length === 0 ? `${partialText}${this.result.output}` : partialText;
|
||||
addImages(this.result.intermediateSteps, responseMessage);
|
||||
await this.generateTextStream(this.result.output, opts.onProgress, { delay: 5 });
|
||||
return await this.handleResponseMessage(responseMessage, saveOptions, user);
|
||||
}
|
||||
|
||||
if (this.agentOptions.skipCompletion && this.result.output) {
|
||||
responseMessage.text = this.result.output;
|
||||
addImages(this.result.intermediateSteps, responseMessage);
|
||||
await this.generateTextStream(this.result.output, opts.onProgress, { delay: 5 });
|
||||
return await this.handleResponseMessage(responseMessage, saveOptions, user);
|
||||
}
|
||||
|
||||
logger.debug('[PluginsClient] Completion phase: this.result', this.result);
|
||||
|
||||
const promptPrefix = buildPromptPrefix({
|
||||
result: this.result,
|
||||
message,
|
||||
functionsAgent: this.functionsAgent,
|
||||
});
|
||||
|
||||
logger.debug('[PluginsClient]', { promptPrefix });
|
||||
|
||||
payload = await this.buildCompletionPrompt({
|
||||
messages: this.currentMessages,
|
||||
promptPrefix,
|
||||
});
|
||||
|
||||
logger.debug('[PluginsClient] buildCompletionPrompt Payload', payload);
|
||||
responseMessage.text = await this.sendCompletion(payload, opts);
|
||||
return await this.handleResponseMessage(responseMessage, saveOptions, user);
|
||||
}
|
||||
|
||||
async buildCompletionPrompt({ messages, promptPrefix: _promptPrefix }) {
|
||||
logger.debug('[PluginsClient] buildCompletionPrompt messages', messages);
|
||||
|
||||
const orderedMessages = messages;
|
||||
let promptPrefix = _promptPrefix.trim();
|
||||
// If the prompt prefix doesn't end with the end token, add it.
|
||||
if (!promptPrefix.endsWith(`${this.endToken}`)) {
|
||||
promptPrefix = `${promptPrefix.trim()}${this.endToken}\n\n`;
|
||||
}
|
||||
promptPrefix = `${this.startToken}Instructions:\n${promptPrefix}`;
|
||||
const promptSuffix = `${this.startToken}${this.chatGptLabel ?? 'Assistant'}:\n`;
|
||||
|
||||
const instructionsPayload = {
|
||||
role: 'system',
|
||||
content: promptPrefix,
|
||||
};
|
||||
|
||||
const messagePayload = {
|
||||
role: 'system',
|
||||
content: promptSuffix,
|
||||
};
|
||||
|
||||
if (this.isGpt3) {
|
||||
instructionsPayload.role = 'user';
|
||||
messagePayload.role = 'user';
|
||||
instructionsPayload.content += `\n${promptSuffix}`;
|
||||
}
|
||||
|
||||
// testing if this works with browser endpoint
|
||||
if (!this.isGpt3 && this.options.reverseProxyUrl) {
|
||||
instructionsPayload.role = 'user';
|
||||
}
|
||||
|
||||
let currentTokenCount =
|
||||
this.getTokenCountForMessage(instructionsPayload) +
|
||||
this.getTokenCountForMessage(messagePayload);
|
||||
|
||||
let promptBody = '';
|
||||
const maxTokenCount = this.maxPromptTokens;
|
||||
// Iterate backwards through the messages, adding them to the prompt until we reach the max token count.
|
||||
// Do this within a recursive async function so that it doesn't block the event loop for too long.
|
||||
const buildPromptBody = async () => {
|
||||
if (currentTokenCount < maxTokenCount && orderedMessages.length > 0) {
|
||||
const message = orderedMessages.pop();
|
||||
const isCreatedByUser = message.isCreatedByUser || message.role?.toLowerCase() === 'user';
|
||||
const roleLabel = isCreatedByUser ? this.userLabel : this.chatGptLabel;
|
||||
let messageString = `${this.startToken}${roleLabel}:\n${
|
||||
message.text ?? message.content ?? ''
|
||||
}${this.endToken}\n`;
|
||||
let newPromptBody = `${messageString}${promptBody}`;
|
||||
|
||||
const tokenCountForMessage = this.getTokenCount(messageString);
|
||||
const newTokenCount = currentTokenCount + tokenCountForMessage;
|
||||
if (newTokenCount > maxTokenCount) {
|
||||
if (promptBody) {
|
||||
// This message would put us over the token limit, so don't add it.
|
||||
return false;
|
||||
}
|
||||
// This is the first message, so we can't add it. Just throw an error.
|
||||
throw new Error(
|
||||
`Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`,
|
||||
);
|
||||
}
|
||||
promptBody = newPromptBody;
|
||||
currentTokenCount = newTokenCount;
|
||||
// wait for next tick to avoid blocking the event loop
|
||||
await new Promise((resolve) => setTimeout(resolve, 0));
|
||||
return buildPromptBody();
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
await buildPromptBody();
|
||||
const prompt = promptBody;
|
||||
messagePayload.content = prompt;
|
||||
// Add 2 tokens for metadata after all messages have been counted.
|
||||
currentTokenCount += 2;
|
||||
|
||||
if (this.isGpt3 && messagePayload.content.length > 0) {
|
||||
const context = 'Chat History:\n';
|
||||
messagePayload.content = `${context}${prompt}`;
|
||||
currentTokenCount += this.getTokenCount(context);
|
||||
}
|
||||
|
||||
// Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response.
|
||||
this.modelOptions.max_tokens = Math.min(
|
||||
this.maxContextTokens - currentTokenCount,
|
||||
this.maxResponseTokens,
|
||||
);
|
||||
|
||||
if (this.isGpt3) {
|
||||
messagePayload.content += promptSuffix;
|
||||
return [instructionsPayload, messagePayload];
|
||||
}
|
||||
|
||||
const result = [messagePayload, instructionsPayload];
|
||||
|
||||
if (this.functionsAgent && !this.isGpt3) {
|
||||
result[1].content = `${result[1].content}\n${this.startToken}${this.chatGptLabel}:\nSure thing! Here is the output you requested:\n`;
|
||||
}
|
||||
|
||||
return result.filter((message) => message.content.length > 0);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = PluginsClient;
|
||||
@@ -1,15 +1,11 @@
|
||||
const ChatGPTClient = require('./ChatGPTClient');
|
||||
const OpenAIClient = require('./OpenAIClient');
|
||||
const PluginsClient = require('./PluginsClient');
|
||||
const GoogleClient = require('./GoogleClient');
|
||||
const TextStream = require('./TextStream');
|
||||
const AnthropicClient = require('./AnthropicClient');
|
||||
const toolUtils = require('./tools/util');
|
||||
|
||||
module.exports = {
|
||||
ChatGPTClient,
|
||||
OpenAIClient,
|
||||
PluginsClient,
|
||||
GoogleClient,
|
||||
TextStream,
|
||||
AnthropicClient,
|
||||
|
||||
@@ -531,44 +531,6 @@ describe('OpenAIClient', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('sendMessage/getCompletion/chatCompletion', () => {
|
||||
afterEach(() => {
|
||||
delete process.env.AZURE_OPENAI_DEFAULT_MODEL;
|
||||
delete process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME;
|
||||
});
|
||||
|
||||
it('should call getCompletion and fetchEventSource when using a text/instruct model', async () => {
|
||||
const model = 'text-davinci-003';
|
||||
const onProgress = jest.fn().mockImplementation(() => ({}));
|
||||
|
||||
const testClient = new OpenAIClient('test-api-key', {
|
||||
...defaultOptions,
|
||||
modelOptions: { model },
|
||||
});
|
||||
|
||||
const getCompletion = jest.spyOn(testClient, 'getCompletion');
|
||||
await testClient.sendMessage('Hi mom!', { onProgress });
|
||||
|
||||
expect(getCompletion).toHaveBeenCalled();
|
||||
expect(getCompletion.mock.calls.length).toBe(1);
|
||||
|
||||
expect(getCompletion.mock.calls[0][0]).toBe('||>User:\nHi mom!\n||>Assistant:\n');
|
||||
|
||||
expect(fetchEventSource).toHaveBeenCalled();
|
||||
expect(fetchEventSource.mock.calls.length).toBe(1);
|
||||
|
||||
// Check if the first argument (url) is correct
|
||||
const firstCallArgs = fetchEventSource.mock.calls[0];
|
||||
|
||||
const expectedURL = 'https://api.openai.com/v1/completions';
|
||||
expect(firstCallArgs[0]).toBe(expectedURL);
|
||||
|
||||
const requestBody = JSON.parse(firstCallArgs[1].body);
|
||||
expect(requestBody).toHaveProperty('model');
|
||||
expect(requestBody.model).toBe(model);
|
||||
});
|
||||
});
|
||||
|
||||
describe('checkVisionRequest functionality', () => {
|
||||
let client;
|
||||
const attachments = [{ type: 'image/png' }];
|
||||
|
||||
@@ -1,314 +0,0 @@
|
||||
const crypto = require('crypto');
|
||||
const { Constants } = require('librechat-data-provider');
|
||||
const { HumanMessage, AIMessage } = require('@langchain/core/messages');
|
||||
const PluginsClient = require('../PluginsClient');
|
||||
|
||||
jest.mock('~/db/connect');
|
||||
jest.mock('~/models/Conversation', () => {
|
||||
return function () {
|
||||
return {
|
||||
save: jest.fn(),
|
||||
deleteConvos: jest.fn(),
|
||||
};
|
||||
};
|
||||
});
|
||||
|
||||
const defaultAzureOptions = {
|
||||
azureOpenAIApiInstanceName: 'your-instance-name',
|
||||
azureOpenAIApiDeploymentName: 'your-deployment-name',
|
||||
azureOpenAIApiVersion: '2020-07-01-preview',
|
||||
};
|
||||
|
||||
describe('PluginsClient', () => {
|
||||
let TestAgent;
|
||||
let options = {
|
||||
tools: [],
|
||||
modelOptions: {
|
||||
model: 'gpt-3.5-turbo',
|
||||
temperature: 0,
|
||||
max_tokens: 2,
|
||||
},
|
||||
agentOptions: {
|
||||
model: 'gpt-3.5-turbo',
|
||||
},
|
||||
};
|
||||
let parentMessageId;
|
||||
let conversationId;
|
||||
const fakeMessages = [];
|
||||
const userMessage = 'Hello, ChatGPT!';
|
||||
const apiKey = 'fake-api-key';
|
||||
|
||||
beforeEach(() => {
|
||||
TestAgent = new PluginsClient(apiKey, options);
|
||||
TestAgent.loadHistory = jest
|
||||
.fn()
|
||||
.mockImplementation((conversationId, parentMessageId = null) => {
|
||||
if (!conversationId) {
|
||||
TestAgent.currentMessages = [];
|
||||
return Promise.resolve([]);
|
||||
}
|
||||
|
||||
const orderedMessages = TestAgent.constructor.getMessagesForConversation({
|
||||
messages: fakeMessages,
|
||||
parentMessageId,
|
||||
});
|
||||
|
||||
const chatMessages = orderedMessages.map((msg) =>
|
||||
msg?.isCreatedByUser || msg?.role?.toLowerCase() === 'user'
|
||||
? new HumanMessage(msg.text)
|
||||
: new AIMessage(msg.text),
|
||||
);
|
||||
|
||||
TestAgent.currentMessages = orderedMessages;
|
||||
return Promise.resolve(chatMessages);
|
||||
});
|
||||
TestAgent.sendMessage = jest.fn().mockImplementation(async (message, opts = {}) => {
|
||||
if (opts && typeof opts === 'object') {
|
||||
TestAgent.setOptions(opts);
|
||||
}
|
||||
const conversationId = opts.conversationId || crypto.randomUUID();
|
||||
const parentMessageId = opts.parentMessageId || Constants.NO_PARENT;
|
||||
const userMessageId = opts.overrideParentMessageId || crypto.randomUUID();
|
||||
this.pastMessages = await TestAgent.loadHistory(
|
||||
conversationId,
|
||||
TestAgent.options?.parentMessageId,
|
||||
);
|
||||
|
||||
const userMessage = {
|
||||
text: message,
|
||||
sender: 'ChatGPT',
|
||||
isCreatedByUser: true,
|
||||
messageId: userMessageId,
|
||||
parentMessageId,
|
||||
conversationId,
|
||||
};
|
||||
|
||||
const response = {
|
||||
sender: 'ChatGPT',
|
||||
text: 'Hello, User!',
|
||||
isCreatedByUser: false,
|
||||
messageId: crypto.randomUUID(),
|
||||
parentMessageId: userMessage.messageId,
|
||||
conversationId,
|
||||
};
|
||||
|
||||
fakeMessages.push(userMessage);
|
||||
fakeMessages.push(response);
|
||||
return response;
|
||||
});
|
||||
});
|
||||
|
||||
test('initializes PluginsClient without crashing', () => {
|
||||
expect(TestAgent).toBeInstanceOf(PluginsClient);
|
||||
});
|
||||
|
||||
test('check setOptions function', () => {
|
||||
expect(TestAgent.agentIsGpt3).toBe(true);
|
||||
});
|
||||
|
||||
describe('sendMessage', () => {
|
||||
test('sendMessage should return a response message', async () => {
|
||||
const expectedResult = expect.objectContaining({
|
||||
sender: 'ChatGPT',
|
||||
text: expect.any(String),
|
||||
isCreatedByUser: false,
|
||||
messageId: expect.any(String),
|
||||
parentMessageId: expect.any(String),
|
||||
conversationId: expect.any(String),
|
||||
});
|
||||
|
||||
const response = await TestAgent.sendMessage(userMessage);
|
||||
parentMessageId = response.messageId;
|
||||
conversationId = response.conversationId;
|
||||
expect(response).toEqual(expectedResult);
|
||||
});
|
||||
|
||||
test('sendMessage should work with provided conversationId and parentMessageId', async () => {
|
||||
const userMessage = 'Second message in the conversation';
|
||||
const opts = {
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
};
|
||||
|
||||
const expectedResult = expect.objectContaining({
|
||||
sender: 'ChatGPT',
|
||||
text: expect.any(String),
|
||||
isCreatedByUser: false,
|
||||
messageId: expect.any(String),
|
||||
parentMessageId: expect.any(String),
|
||||
conversationId: opts.conversationId,
|
||||
});
|
||||
|
||||
const response = await TestAgent.sendMessage(userMessage, opts);
|
||||
parentMessageId = response.messageId;
|
||||
expect(response.conversationId).toEqual(conversationId);
|
||||
expect(response).toEqual(expectedResult);
|
||||
});
|
||||
|
||||
test('should return chat history', async () => {
|
||||
const chatMessages = await TestAgent.loadHistory(conversationId, parentMessageId);
|
||||
expect(TestAgent.currentMessages).toHaveLength(4);
|
||||
expect(chatMessages[0].text).toEqual(userMessage);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getFunctionModelName', () => {
|
||||
let client;
|
||||
|
||||
beforeEach(() => {
|
||||
client = new PluginsClient('dummy_api_key');
|
||||
});
|
||||
|
||||
test('should return the input when it includes a dash followed by four digits', () => {
|
||||
expect(client.getFunctionModelName('-1234')).toBe('-1234');
|
||||
expect(client.getFunctionModelName('gpt-4-5678-preview')).toBe('gpt-4-5678-preview');
|
||||
});
|
||||
|
||||
test('should return the input for all function-capable models (`0613` models and above)', () => {
|
||||
expect(client.getFunctionModelName('gpt-4-0613')).toBe('gpt-4-0613');
|
||||
expect(client.getFunctionModelName('gpt-4-32k-0613')).toBe('gpt-4-32k-0613');
|
||||
expect(client.getFunctionModelName('gpt-3.5-turbo-0613')).toBe('gpt-3.5-turbo-0613');
|
||||
expect(client.getFunctionModelName('gpt-3.5-turbo-16k-0613')).toBe('gpt-3.5-turbo-16k-0613');
|
||||
expect(client.getFunctionModelName('gpt-3.5-turbo-1106')).toBe('gpt-3.5-turbo-1106');
|
||||
expect(client.getFunctionModelName('gpt-4-1106-preview')).toBe('gpt-4-1106-preview');
|
||||
expect(client.getFunctionModelName('gpt-4-1106')).toBe('gpt-4-1106');
|
||||
});
|
||||
|
||||
test('should return the corresponding model if input is non-function capable (`0314` models)', () => {
|
||||
expect(client.getFunctionModelName('gpt-4-0314')).toBe('gpt-4');
|
||||
expect(client.getFunctionModelName('gpt-4-32k-0314')).toBe('gpt-4');
|
||||
expect(client.getFunctionModelName('gpt-3.5-turbo-0314')).toBe('gpt-3.5-turbo');
|
||||
expect(client.getFunctionModelName('gpt-3.5-turbo-16k-0314')).toBe('gpt-3.5-turbo');
|
||||
});
|
||||
|
||||
test('should return "gpt-3.5-turbo" when the input includes "gpt-3.5-turbo"', () => {
|
||||
expect(client.getFunctionModelName('test gpt-3.5-turbo model')).toBe('gpt-3.5-turbo');
|
||||
});
|
||||
|
||||
test('should return "gpt-4" when the input includes "gpt-4"', () => {
|
||||
expect(client.getFunctionModelName('testing gpt-4')).toBe('gpt-4');
|
||||
});
|
||||
|
||||
test('should return "gpt-3.5-turbo" for input that does not meet any specific condition', () => {
|
||||
expect(client.getFunctionModelName('random string')).toBe('gpt-3.5-turbo');
|
||||
expect(client.getFunctionModelName('')).toBe('gpt-3.5-turbo');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Azure OpenAI tests specific to Plugins', () => {
|
||||
// TODO: add more tests for Azure OpenAI integration with Plugins
|
||||
// let client;
|
||||
// beforeEach(() => {
|
||||
// client = new PluginsClient('dummy_api_key');
|
||||
// });
|
||||
|
||||
test('should not call getFunctionModelName when azure options are set', () => {
|
||||
const spy = jest.spyOn(PluginsClient.prototype, 'getFunctionModelName');
|
||||
const model = 'gpt-4-turbo';
|
||||
|
||||
// note, without the azure change in PR #1766, `getFunctionModelName` is called twice
|
||||
const testClient = new PluginsClient('dummy_api_key', {
|
||||
agentOptions: {
|
||||
model,
|
||||
agent: 'functions',
|
||||
},
|
||||
azure: defaultAzureOptions,
|
||||
});
|
||||
|
||||
expect(spy).not.toHaveBeenCalled();
|
||||
expect(testClient.agentOptions.model).toBe(model);
|
||||
|
||||
spy.mockRestore();
|
||||
});
|
||||
});
|
||||
|
||||
describe('sendMessage with filtered tools', () => {
|
||||
let TestAgent;
|
||||
const apiKey = 'fake-api-key';
|
||||
const mockTools = [{ name: 'tool1' }, { name: 'tool2' }, { name: 'tool3' }, { name: 'tool4' }];
|
||||
|
||||
beforeEach(() => {
|
||||
TestAgent = new PluginsClient(apiKey, {
|
||||
tools: mockTools,
|
||||
modelOptions: {
|
||||
model: 'gpt-3.5-turbo',
|
||||
temperature: 0,
|
||||
max_tokens: 2,
|
||||
},
|
||||
agentOptions: {
|
||||
model: 'gpt-3.5-turbo',
|
||||
},
|
||||
});
|
||||
|
||||
TestAgent.options.req = {
|
||||
app: {
|
||||
locals: {},
|
||||
},
|
||||
};
|
||||
|
||||
TestAgent.sendMessage = jest.fn().mockImplementation(async () => {
|
||||
const { filteredTools = [], includedTools = [] } = TestAgent.options.req.app.locals;
|
||||
|
||||
if (includedTools.length > 0) {
|
||||
const tools = TestAgent.options.tools.filter((plugin) =>
|
||||
includedTools.includes(plugin.name),
|
||||
);
|
||||
TestAgent.options.tools = tools;
|
||||
} else {
|
||||
const tools = TestAgent.options.tools.filter(
|
||||
(plugin) => !filteredTools.includes(plugin.name),
|
||||
);
|
||||
TestAgent.options.tools = tools;
|
||||
}
|
||||
|
||||
return {
|
||||
text: 'Mocked response',
|
||||
tools: TestAgent.options.tools,
|
||||
};
|
||||
});
|
||||
});
|
||||
|
||||
test('should filter out tools when filteredTools is provided', async () => {
|
||||
TestAgent.options.req.app.locals.filteredTools = ['tool1', 'tool3'];
|
||||
const response = await TestAgent.sendMessage('Test message');
|
||||
expect(response.tools).toHaveLength(2);
|
||||
expect(response.tools).toEqual(
|
||||
expect.arrayContaining([
|
||||
expect.objectContaining({ name: 'tool2' }),
|
||||
expect.objectContaining({ name: 'tool4' }),
|
||||
]),
|
||||
);
|
||||
});
|
||||
|
||||
test('should only include specified tools when includedTools is provided', async () => {
|
||||
TestAgent.options.req.app.locals.includedTools = ['tool2', 'tool4'];
|
||||
const response = await TestAgent.sendMessage('Test message');
|
||||
expect(response.tools).toHaveLength(2);
|
||||
expect(response.tools).toEqual(
|
||||
expect.arrayContaining([
|
||||
expect.objectContaining({ name: 'tool2' }),
|
||||
expect.objectContaining({ name: 'tool4' }),
|
||||
]),
|
||||
);
|
||||
});
|
||||
|
||||
test('should prioritize includedTools over filteredTools', async () => {
|
||||
TestAgent.options.req.app.locals.filteredTools = ['tool1', 'tool3'];
|
||||
TestAgent.options.req.app.locals.includedTools = ['tool1', 'tool2'];
|
||||
const response = await TestAgent.sendMessage('Test message');
|
||||
expect(response.tools).toHaveLength(2);
|
||||
expect(response.tools).toEqual(
|
||||
expect.arrayContaining([
|
||||
expect.objectContaining({ name: 'tool1' }),
|
||||
expect.objectContaining({ name: 'tool2' }),
|
||||
]),
|
||||
);
|
||||
});
|
||||
|
||||
test('should not modify tools when no filters are provided', async () => {
|
||||
const response = await TestAgent.sendMessage('Test message');
|
||||
expect(response.tools).toHaveLength(4);
|
||||
expect(response.tools).toEqual(expect.arrayContaining(mockTools));
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -107,6 +107,12 @@ const getImageEditPromptDescription = () => {
|
||||
return process.env.IMAGE_EDIT_OAI_PROMPT_DESCRIPTION || DEFAULT_IMAGE_EDIT_PROMPT_DESCRIPTION;
|
||||
};
|
||||
|
||||
function createAbortHandler() {
|
||||
return function () {
|
||||
logger.debug('[ImageGenOAI] Image generation aborted');
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates OpenAI Image tools (generation and editing)
|
||||
* @param {Object} fields - Configuration fields
|
||||
@@ -201,10 +207,18 @@ function createOpenAIImageTools(fields = {}) {
|
||||
}
|
||||
|
||||
let resp;
|
||||
/** @type {AbortSignal} */
|
||||
let derivedSignal = null;
|
||||
/** @type {() => void} */
|
||||
let abortHandler = null;
|
||||
|
||||
try {
|
||||
const derivedSignal = runnableConfig?.signal
|
||||
? AbortSignal.any([runnableConfig.signal])
|
||||
: undefined;
|
||||
if (runnableConfig?.signal) {
|
||||
derivedSignal = AbortSignal.any([runnableConfig.signal]);
|
||||
abortHandler = createAbortHandler();
|
||||
derivedSignal.addEventListener('abort', abortHandler, { once: true });
|
||||
}
|
||||
|
||||
resp = await openai.images.generate(
|
||||
{
|
||||
model: 'gpt-image-1',
|
||||
@@ -228,6 +242,10 @@ function createOpenAIImageTools(fields = {}) {
|
||||
logAxiosError({ error, message });
|
||||
return returnValue(`Something went wrong when trying to generate the image. The OpenAI API may be unavailable:
|
||||
Error Message: ${error.message}`);
|
||||
} finally {
|
||||
if (abortHandler && derivedSignal) {
|
||||
derivedSignal.removeEventListener('abort', abortHandler);
|
||||
}
|
||||
}
|
||||
|
||||
if (!resp) {
|
||||
@@ -409,10 +427,17 @@ Error Message: ${error.message}`);
|
||||
headers['Authorization'] = `Bearer ${apiKey}`;
|
||||
}
|
||||
|
||||
/** @type {AbortSignal} */
|
||||
let derivedSignal = null;
|
||||
/** @type {() => void} */
|
||||
let abortHandler = null;
|
||||
|
||||
try {
|
||||
const derivedSignal = runnableConfig?.signal
|
||||
? AbortSignal.any([runnableConfig.signal])
|
||||
: undefined;
|
||||
if (runnableConfig?.signal) {
|
||||
derivedSignal = AbortSignal.any([runnableConfig.signal]);
|
||||
abortHandler = createAbortHandler();
|
||||
derivedSignal.addEventListener('abort', abortHandler, { once: true });
|
||||
}
|
||||
|
||||
/** @type {import('axios').AxiosRequestConfig} */
|
||||
const axiosConfig = {
|
||||
@@ -467,6 +492,10 @@ Error Message: ${error.message}`);
|
||||
logAxiosError({ error, message });
|
||||
return returnValue(`Something went wrong when trying to edit the image. The OpenAI API may be unavailable:
|
||||
Error Message: ${error.message || 'Unknown error'}`);
|
||||
} finally {
|
||||
if (abortHandler && derivedSignal) {
|
||||
derivedSignal.removeEventListener('abort', abortHandler);
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
|
||||
3
api/cache/banViolation.js
vendored
3
api/cache/banViolation.js
vendored
@@ -1,7 +1,8 @@
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { isEnabled, math } = require('@librechat/api');
|
||||
const { ViolationTypes } = require('librechat-data-provider');
|
||||
const { isEnabled, math, removePorts } = require('~/server/utils');
|
||||
const { deleteAllUserSessions } = require('~/models');
|
||||
const { removePorts } = require('~/server/utils');
|
||||
const getLogStores = require('./getLogStores');
|
||||
|
||||
const { BAN_VIOLATIONS, BAN_INTERVAL } = process.env ?? {};
|
||||
|
||||
2
api/cache/getLogStores.js
vendored
2
api/cache/getLogStores.js
vendored
@@ -1,7 +1,7 @@
|
||||
const { Keyv } = require('keyv');
|
||||
const { isEnabled, math } = require('@librechat/api');
|
||||
const { CacheKeys, ViolationTypes, Time } = require('librechat-data-provider');
|
||||
const { logFile, violationFile } = require('./keyvFiles');
|
||||
const { isEnabled, math } = require('~/server/utils');
|
||||
const keyvRedis = require('./keyvRedis');
|
||||
const keyvMongo = require('./keyvMongo');
|
||||
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { createTempChatExpirationDate } = require('@librechat/api');
|
||||
const getCustomConfig = require('~/server/services/Config/loadCustomConfig');
|
||||
const { getMessages, deleteMessages } = require('./Message');
|
||||
const { Conversation } = require('~/db/models');
|
||||
|
||||
@@ -98,10 +100,15 @@ module.exports = {
|
||||
update.conversationId = newConversationId;
|
||||
}
|
||||
|
||||
if (req.body.isTemporary) {
|
||||
const expiredAt = new Date();
|
||||
expiredAt.setDate(expiredAt.getDate() + 30);
|
||||
update.expiredAt = expiredAt;
|
||||
if (req?.body?.isTemporary) {
|
||||
try {
|
||||
const customConfig = await getCustomConfig();
|
||||
update.expiredAt = createTempChatExpirationDate(customConfig);
|
||||
} catch (err) {
|
||||
logger.error('Error creating temporary chat expiration date:', err);
|
||||
logger.info(`---\`saveConvo\` context: ${metadata?.context}`);
|
||||
update.expiredAt = null;
|
||||
}
|
||||
} else {
|
||||
update.expiredAt = null;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { EToolResources } = require('librechat-data-provider');
|
||||
const { EToolResources, FileContext } = require('librechat-data-provider');
|
||||
const { File } = require('~/db/models');
|
||||
|
||||
/**
|
||||
@@ -32,19 +32,19 @@ const getFiles = async (filter, _sortOptions, selectFields = { text: 0 }) => {
|
||||
* @returns {Promise<Array<MongoFile>>} Files that match the criteria
|
||||
*/
|
||||
const getToolFilesByIds = async (fileIds, toolResourceSet) => {
|
||||
if (!fileIds || !fileIds.length) {
|
||||
if (!fileIds || !fileIds.length || !toolResourceSet?.size) {
|
||||
return [];
|
||||
}
|
||||
|
||||
try {
|
||||
const filter = {
|
||||
file_id: { $in: fileIds },
|
||||
$or: [],
|
||||
};
|
||||
|
||||
if (toolResourceSet.size) {
|
||||
filter.$or = [];
|
||||
if (toolResourceSet.has(EToolResources.ocr)) {
|
||||
filter.$or.push({ text: { $exists: true, $ne: null }, context: FileContext.agents });
|
||||
}
|
||||
|
||||
if (toolResourceSet.has(EToolResources.file_search)) {
|
||||
filter.$or.push({ embedded: true });
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
const { z } = require('zod');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { createTempChatExpirationDate } = require('@librechat/api');
|
||||
const getCustomConfig = require('~/server/services/Config/loadCustomConfig');
|
||||
const { Message } = require('~/db/models');
|
||||
|
||||
const idSchema = z.string().uuid();
|
||||
@@ -54,9 +56,14 @@ async function saveMessage(req, params, metadata) {
|
||||
};
|
||||
|
||||
if (req?.body?.isTemporary) {
|
||||
const expiredAt = new Date();
|
||||
expiredAt.setDate(expiredAt.getDate() + 30);
|
||||
update.expiredAt = expiredAt;
|
||||
try {
|
||||
const customConfig = await getCustomConfig();
|
||||
update.expiredAt = createTempChatExpirationDate(customConfig);
|
||||
} catch (err) {
|
||||
logger.error('Error creating temporary chat expiration date:', err);
|
||||
logger.info(`---\`saveMessage\` context: ${metadata?.context}`);
|
||||
update.expiredAt = null;
|
||||
}
|
||||
} else {
|
||||
update.expiredAt = null;
|
||||
}
|
||||
|
||||
@@ -48,14 +48,13 @@
|
||||
"@langchain/google-genai": "^0.2.13",
|
||||
"@langchain/google-vertexai": "^0.2.13",
|
||||
"@langchain/textsplitters": "^0.1.0",
|
||||
"@librechat/agents": "^2.4.41",
|
||||
"@librechat/agents": "^2.4.46",
|
||||
"@librechat/api": "*",
|
||||
"@librechat/data-schemas": "*",
|
||||
"@node-saml/passport-saml": "^5.0.0",
|
||||
"@waylaidwanderer/fetch-event-source": "^3.0.1",
|
||||
"axios": "^1.8.2",
|
||||
"bcryptjs": "^2.4.3",
|
||||
"cohere-ai": "^7.9.1",
|
||||
"compression": "^1.7.4",
|
||||
"connect-redis": "^7.1.0",
|
||||
"cookie": "^0.7.2",
|
||||
|
||||
@@ -169,9 +169,6 @@ function disposeClient(client) {
|
||||
client.isGenerativeModel = null;
|
||||
}
|
||||
// Properties specific to OpenAIClient
|
||||
if (client.ChatGPTClient) {
|
||||
client.ChatGPTClient = null;
|
||||
}
|
||||
if (client.completionsUrl) {
|
||||
client.completionsUrl = null;
|
||||
}
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
const cookies = require('cookie');
|
||||
const jwt = require('jsonwebtoken');
|
||||
const openIdClient = require('openid-client');
|
||||
const { isEnabled } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const {
|
||||
registerUser,
|
||||
resetPassword,
|
||||
setAuthTokens,
|
||||
requestPasswordReset,
|
||||
setOpenIDAuthTokens,
|
||||
resetPassword,
|
||||
setAuthTokens,
|
||||
registerUser,
|
||||
} = require('~/server/services/AuthService');
|
||||
const { findUser, getUserById, deleteAllUserSessions, findSession } = require('~/models');
|
||||
const { getOpenIdConfig } = require('~/strategies');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
|
||||
const registrationController = async (req, res) => {
|
||||
try {
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
const { sendEvent } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { getResponseSender } = require('librechat-data-provider');
|
||||
const {
|
||||
handleAbortError,
|
||||
@@ -10,9 +12,8 @@ const {
|
||||
clientRegistry,
|
||||
requestDataMap,
|
||||
} = require('~/server/cleanup');
|
||||
const { sendMessage, createOnProgress } = require('~/server/utils');
|
||||
const { createOnProgress } = require('~/server/utils');
|
||||
const { saveMessage } = require('~/models');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const EditController = async (req, res, next, initializeClient) => {
|
||||
let {
|
||||
@@ -198,7 +199,7 @@ const EditController = async (req, res, next, initializeClient) => {
|
||||
const finalUserMessage = reqDataContext.userMessage;
|
||||
const finalResponseMessage = { ...response };
|
||||
|
||||
sendMessage(res, {
|
||||
sendEvent(res, {
|
||||
final: true,
|
||||
conversation,
|
||||
title: conversation.title,
|
||||
|
||||
@@ -9,6 +9,7 @@ const {
|
||||
} = require('@librechat/api');
|
||||
const {
|
||||
Callback,
|
||||
Providers,
|
||||
GraphEvents,
|
||||
formatMessage,
|
||||
formatAgentMessages,
|
||||
@@ -31,22 +32,29 @@ const {
|
||||
} = require('librechat-data-provider');
|
||||
const { DynamicStructuredTool } = require('@langchain/core/tools');
|
||||
const { getBufferString, HumanMessage } = require('@langchain/core/messages');
|
||||
const {
|
||||
getCustomEndpointConfig,
|
||||
createGetMCPAuthMap,
|
||||
checkCapability,
|
||||
} = require('~/server/services/Config');
|
||||
const { createGetMCPAuthMap, checkCapability } = require('~/server/services/Config');
|
||||
const { addCacheControl, createContextHandlers } = require('~/app/clients/prompts');
|
||||
const { initializeAgent } = require('~/server/services/Endpoints/agents/agent');
|
||||
const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
|
||||
const { getFormattedMemories, deleteMemory, setMemory } = require('~/models');
|
||||
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
|
||||
const initOpenAI = require('~/server/services/Endpoints/openAI/initialize');
|
||||
const { getProviderConfig } = require('~/server/services/Endpoints');
|
||||
const { checkAccess } = require('~/server/middleware/roles/access');
|
||||
const BaseClient = require('~/app/clients/BaseClient');
|
||||
const { loadAgent } = require('~/models/Agent');
|
||||
const { getMCPManager } = require('~/config');
|
||||
|
||||
const omitTitleOptions = new Set([
|
||||
'stream',
|
||||
'thinking',
|
||||
'streaming',
|
||||
'clientOptions',
|
||||
'thinkingConfig',
|
||||
'thinkingBudget',
|
||||
'includeThoughts',
|
||||
'maxOutputTokens',
|
||||
]);
|
||||
|
||||
/**
|
||||
* @param {ServerRequest} req
|
||||
* @param {Agent} agent
|
||||
@@ -677,7 +685,7 @@ class AgentClient extends BaseClient {
|
||||
hide_sequential_outputs: this.options.agent.hide_sequential_outputs,
|
||||
user: this.options.req.user,
|
||||
},
|
||||
recursionLimit: agentsEConfig?.recursionLimit,
|
||||
recursionLimit: agentsEConfig?.recursionLimit ?? 25,
|
||||
signal: abortController.signal,
|
||||
streamMode: 'values',
|
||||
version: 'v2',
|
||||
@@ -983,23 +991,26 @@ class AgentClient extends BaseClient {
|
||||
throw new Error('Run not initialized');
|
||||
}
|
||||
const { handleLLMEnd, collected: collectedMetadata } = createMetadataAggregator();
|
||||
const endpoint = this.options.agent.endpoint;
|
||||
const { req, res } = this.options;
|
||||
const { req, res, agent } = this.options;
|
||||
const endpoint = agent.endpoint;
|
||||
|
||||
/** @type {import('@librechat/agents').ClientOptions} */
|
||||
let clientOptions = {
|
||||
maxTokens: 75,
|
||||
model: agent.model_parameters.model,
|
||||
};
|
||||
let endpointConfig = req.app.locals[endpoint];
|
||||
|
||||
const { getOptions, overrideProvider, customEndpointConfig } =
|
||||
await getProviderConfig(endpoint);
|
||||
|
||||
/** @type {TEndpoint | undefined} */
|
||||
const endpointConfig = req.app.locals[endpoint] ?? customEndpointConfig;
|
||||
if (!endpointConfig) {
|
||||
try {
|
||||
endpointConfig = await getCustomEndpointConfig(endpoint);
|
||||
} catch (err) {
|
||||
logger.error(
|
||||
'[api/server/controllers/agents/client.js #titleConvo] Error getting custom endpoint config',
|
||||
err,
|
||||
);
|
||||
}
|
||||
logger.warn(
|
||||
'[api/server/controllers/agents/client.js #titleConvo] Error getting endpoint config',
|
||||
);
|
||||
}
|
||||
|
||||
if (
|
||||
endpointConfig &&
|
||||
endpointConfig.titleModel &&
|
||||
@@ -1007,30 +1018,50 @@ class AgentClient extends BaseClient {
|
||||
) {
|
||||
clientOptions.model = endpointConfig.titleModel;
|
||||
}
|
||||
|
||||
const options = await getOptions({
|
||||
req,
|
||||
res,
|
||||
optionsOnly: true,
|
||||
overrideEndpoint: endpoint,
|
||||
overrideModel: clientOptions.model,
|
||||
endpointOption: { model_parameters: clientOptions },
|
||||
});
|
||||
|
||||
let provider = options.provider ?? overrideProvider ?? agent.provider;
|
||||
if (
|
||||
endpoint === EModelEndpoint.azureOpenAI &&
|
||||
clientOptions.model &&
|
||||
this.options.agent.model_parameters.model !== clientOptions.model
|
||||
options.llmConfig?.azureOpenAIApiInstanceName == null
|
||||
) {
|
||||
clientOptions =
|
||||
(
|
||||
await initOpenAI({
|
||||
req,
|
||||
res,
|
||||
optionsOnly: true,
|
||||
overrideModel: clientOptions.model,
|
||||
overrideEndpoint: endpoint,
|
||||
endpointOption: {
|
||||
model_parameters: clientOptions,
|
||||
},
|
||||
})
|
||||
)?.llmConfig ?? clientOptions;
|
||||
provider = Providers.OPENAI;
|
||||
}
|
||||
if (/\b(o\d)\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
|
||||
|
||||
/** @type {import('@librechat/agents').ClientOptions} */
|
||||
clientOptions = { ...options.llmConfig };
|
||||
if (options.configOptions) {
|
||||
clientOptions.configuration = options.configOptions;
|
||||
}
|
||||
|
||||
// Ensure maxTokens is set for non-o1 models
|
||||
if (!/\b(o\d)\b/i.test(clientOptions.model) && !clientOptions.maxTokens) {
|
||||
clientOptions.maxTokens = 75;
|
||||
} else if (/\b(o\d)\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
|
||||
delete clientOptions.maxTokens;
|
||||
}
|
||||
|
||||
clientOptions = Object.assign(
|
||||
Object.fromEntries(
|
||||
Object.entries(clientOptions).filter(([key]) => !omitTitleOptions.has(key)),
|
||||
),
|
||||
);
|
||||
|
||||
if (provider === Providers.GOOGLE) {
|
||||
clientOptions.json = true;
|
||||
}
|
||||
|
||||
try {
|
||||
const titleResult = await this.run.generateTitle({
|
||||
provider,
|
||||
inputText: text,
|
||||
contentParts: this.contentParts,
|
||||
clientOptions,
|
||||
@@ -1048,8 +1079,10 @@ class AgentClient extends BaseClient {
|
||||
let input_tokens, output_tokens;
|
||||
|
||||
if (item.usage) {
|
||||
input_tokens = item.usage.input_tokens || item.usage.inputTokens;
|
||||
output_tokens = item.usage.output_tokens || item.usage.outputTokens;
|
||||
input_tokens =
|
||||
item.usage.prompt_tokens || item.usage.input_tokens || item.usage.inputTokens;
|
||||
output_tokens =
|
||||
item.usage.completion_tokens || item.usage.output_tokens || item.usage.outputTokens;
|
||||
} else if (item.tokenUsage) {
|
||||
input_tokens = item.tokenUsage.promptTokens;
|
||||
output_tokens = item.tokenUsage.completionTokens;
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// errorHandler.js
|
||||
const { logger } = require('~/config');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { CacheKeys, ViolationTypes } = require('librechat-data-provider');
|
||||
const { sendResponse } = require('~/server/middleware/error');
|
||||
const { recordUsage } = require('~/server/services/Threads');
|
||||
const { getConvo } = require('~/models/Conversation');
|
||||
const { sendResponse } = require('~/server/utils');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
|
||||
/**
|
||||
* @typedef {Object} ErrorHandlerContext
|
||||
@@ -75,7 +75,7 @@ const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/ch
|
||||
} else if (/Files.*are invalid/.test(error.message)) {
|
||||
const errorMessage = `Files are invalid, or may not have uploaded yet.${
|
||||
endpoint === 'azureAssistants'
|
||||
? ' If using Azure OpenAI, files are only available in the region of the assistant\'s model at the time of upload.'
|
||||
? " If using Azure OpenAI, files are only available in the region of the assistant's model at the time of upload."
|
||||
: ''
|
||||
}`;
|
||||
return sendResponse(req, res, messageData, errorMessage);
|
||||
|
||||
@@ -1,106 +0,0 @@
|
||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||
const { resolveHeaders } = require('librechat-data-provider');
|
||||
const { createLLM } = require('~/app/clients/llm');
|
||||
|
||||
/**
|
||||
* Initializes and returns a Language Learning Model (LLM) instance.
|
||||
*
|
||||
* @param {Object} options - Configuration options for the LLM.
|
||||
* @param {string} options.model - The model identifier.
|
||||
* @param {string} options.modelName - The specific name of the model.
|
||||
* @param {number} options.temperature - The temperature setting for the model.
|
||||
* @param {number} options.presence_penalty - The presence penalty for the model.
|
||||
* @param {number} options.frequency_penalty - The frequency penalty for the model.
|
||||
* @param {number} options.max_tokens - The maximum number of tokens for the model output.
|
||||
* @param {boolean} options.streaming - Whether to use streaming for the model output.
|
||||
* @param {Object} options.context - The context for the conversation.
|
||||
* @param {number} options.tokenBuffer - The token buffer size.
|
||||
* @param {number} options.initialMessageCount - The initial message count.
|
||||
* @param {string} options.conversationId - The ID of the conversation.
|
||||
* @param {string} options.user - The user identifier.
|
||||
* @param {string} options.langchainProxy - The langchain proxy URL.
|
||||
* @param {boolean} options.useOpenRouter - Whether to use OpenRouter.
|
||||
* @param {Object} options.options - Additional options.
|
||||
* @param {Object} options.options.headers - Custom headers for the request.
|
||||
* @param {string} options.options.proxy - Proxy URL.
|
||||
* @param {Object} options.options.req - The request object.
|
||||
* @param {Object} options.options.res - The response object.
|
||||
* @param {boolean} options.options.debug - Whether to enable debug mode.
|
||||
* @param {string} options.apiKey - The API key for authentication.
|
||||
* @param {Object} options.azure - Azure-specific configuration.
|
||||
* @param {Object} options.abortController - The AbortController instance.
|
||||
* @returns {Object} The initialized LLM instance.
|
||||
*/
|
||||
function initializeLLM(options) {
|
||||
const {
|
||||
model,
|
||||
modelName,
|
||||
temperature,
|
||||
presence_penalty,
|
||||
frequency_penalty,
|
||||
max_tokens,
|
||||
streaming,
|
||||
user,
|
||||
langchainProxy,
|
||||
useOpenRouter,
|
||||
options: { headers, proxy },
|
||||
apiKey,
|
||||
azure,
|
||||
} = options;
|
||||
|
||||
const modelOptions = {
|
||||
modelName: modelName || model,
|
||||
temperature,
|
||||
presence_penalty,
|
||||
frequency_penalty,
|
||||
user,
|
||||
};
|
||||
|
||||
if (max_tokens) {
|
||||
modelOptions.max_tokens = max_tokens;
|
||||
}
|
||||
|
||||
const configOptions = {};
|
||||
|
||||
if (langchainProxy) {
|
||||
configOptions.basePath = langchainProxy;
|
||||
}
|
||||
|
||||
if (useOpenRouter) {
|
||||
configOptions.basePath = 'https://openrouter.ai/api/v1';
|
||||
configOptions.baseOptions = {
|
||||
headers: {
|
||||
'HTTP-Referer': 'https://librechat.ai',
|
||||
'X-Title': 'LibreChat',
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
if (headers && typeof headers === 'object' && !Array.isArray(headers)) {
|
||||
configOptions.baseOptions = {
|
||||
headers: resolveHeaders({
|
||||
...headers,
|
||||
...configOptions?.baseOptions?.headers,
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
||||
if (proxy) {
|
||||
configOptions.httpAgent = new HttpsProxyAgent(proxy);
|
||||
configOptions.httpsAgent = new HttpsProxyAgent(proxy);
|
||||
}
|
||||
|
||||
const llm = createLLM({
|
||||
modelOptions,
|
||||
configOptions,
|
||||
openAIApiKey: apiKey,
|
||||
azure,
|
||||
streaming,
|
||||
});
|
||||
|
||||
return llm;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
initializeLLM,
|
||||
};
|
||||
@@ -1,3 +1,5 @@
|
||||
const { sendEvent } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { Constants } = require('librechat-data-provider');
|
||||
const {
|
||||
handleAbortError,
|
||||
@@ -5,9 +7,7 @@ const {
|
||||
cleanupAbortController,
|
||||
} = require('~/server/middleware');
|
||||
const { disposeClient, clientRegistry, requestDataMap } = require('~/server/cleanup');
|
||||
const { sendMessage } = require('~/server/utils');
|
||||
const { saveMessage } = require('~/models');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const AgentController = async (req, res, next, initializeClient, addTitle) => {
|
||||
let {
|
||||
@@ -206,7 +206,7 @@ const AgentController = async (req, res, next, initializeClient, addTitle) => {
|
||||
// Create a new response object with minimal copies
|
||||
const finalResponse = { ...response };
|
||||
|
||||
sendMessage(res, {
|
||||
sendEvent(res, {
|
||||
final: true,
|
||||
conversation,
|
||||
title: conversation.title,
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
const { v4 } = require('uuid');
|
||||
const { sleep } = require('@librechat/agents');
|
||||
const { sendEvent } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const {
|
||||
Time,
|
||||
Constants,
|
||||
@@ -19,20 +22,20 @@ const {
|
||||
addThreadMetadata,
|
||||
saveAssistantMessage,
|
||||
} = require('~/server/services/Threads');
|
||||
const { sendResponse, sendMessage, sleep, countTokens } = require('~/server/utils');
|
||||
const { runAssistant, createOnTextProgress } = require('~/server/services/AssistantService');
|
||||
const validateAuthor = require('~/server/middleware/assistants/validateAuthor');
|
||||
const { formatMessage, createVisionPrompt } = require('~/app/clients/prompts');
|
||||
const { createRun, StreamRunManager } = require('~/server/services/Runs');
|
||||
const { addTitle } = require('~/server/services/Endpoints/assistants');
|
||||
const { createRunBody } = require('~/server/services/createRunBody');
|
||||
const { sendResponse } = require('~/server/middleware/error');
|
||||
const { getTransactions } = require('~/models/Transaction');
|
||||
const { checkBalance } = require('~/models/balanceMethods');
|
||||
const { getConvo } = require('~/models/Conversation');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const { countTokens } = require('~/server/utils');
|
||||
const { getModelMaxTokens } = require('~/utils');
|
||||
const { getOpenAIClient } = require('./helpers');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* @route POST /
|
||||
@@ -471,7 +474,7 @@ const chatV1 = async (req, res) => {
|
||||
await Promise.all(promises);
|
||||
|
||||
const sendInitialResponse = () => {
|
||||
sendMessage(res, {
|
||||
sendEvent(res, {
|
||||
sync: true,
|
||||
conversationId,
|
||||
// messages: previousMessages,
|
||||
@@ -587,7 +590,7 @@ const chatV1 = async (req, res) => {
|
||||
iconURL: endpointOption.iconURL,
|
||||
};
|
||||
|
||||
sendMessage(res, {
|
||||
sendEvent(res, {
|
||||
final: true,
|
||||
conversation,
|
||||
requestMessage: {
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
const { v4 } = require('uuid');
|
||||
const { sleep } = require('@librechat/agents');
|
||||
const { sendEvent } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const {
|
||||
Time,
|
||||
Constants,
|
||||
@@ -22,15 +25,14 @@ const { createErrorHandler } = require('~/server/controllers/assistants/errors')
|
||||
const validateAuthor = require('~/server/middleware/assistants/validateAuthor');
|
||||
const { createRun, StreamRunManager } = require('~/server/services/Runs');
|
||||
const { addTitle } = require('~/server/services/Endpoints/assistants');
|
||||
const { sendMessage, sleep, countTokens } = require('~/server/utils');
|
||||
const { createRunBody } = require('~/server/services/createRunBody');
|
||||
const { getTransactions } = require('~/models/Transaction');
|
||||
const { checkBalance } = require('~/models/balanceMethods');
|
||||
const { getConvo } = require('~/models/Conversation');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const { countTokens } = require('~/server/utils');
|
||||
const { getModelMaxTokens } = require('~/utils');
|
||||
const { getOpenAIClient } = require('./helpers');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* @route POST /
|
||||
@@ -309,7 +311,7 @@ const chatV2 = async (req, res) => {
|
||||
await Promise.all(promises);
|
||||
|
||||
const sendInitialResponse = () => {
|
||||
sendMessage(res, {
|
||||
sendEvent(res, {
|
||||
sync: true,
|
||||
conversationId,
|
||||
// messages: previousMessages,
|
||||
@@ -432,7 +434,7 @@ const chatV2 = async (req, res) => {
|
||||
iconURL: endpointOption.iconURL,
|
||||
};
|
||||
|
||||
sendMessage(res, {
|
||||
sendEvent(res, {
|
||||
final: true,
|
||||
conversation,
|
||||
requestMessage: {
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// errorHandler.js
|
||||
const { sendResponse } = require('~/server/utils');
|
||||
const { logger } = require('~/config');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { CacheKeys, ViolationTypes, ContentTypes } = require('librechat-data-provider');
|
||||
const { getConvo } = require('~/models/Conversation');
|
||||
const { recordUsage, checkMessageGaps } = require('~/server/services/Threads');
|
||||
const { sendResponse } = require('~/server/middleware/error');
|
||||
const { getConvo } = require('~/models/Conversation');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
|
||||
/**
|
||||
* @typedef {Object} ErrorHandlerContext
|
||||
@@ -78,7 +78,7 @@ const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/ch
|
||||
} else if (/Files.*are invalid/.test(error.message)) {
|
||||
const errorMessage = `Files are invalid, or may not have uploaded yet.${
|
||||
endpoint === 'azureAssistants'
|
||||
? ' If using Azure OpenAI, files are only available in the region of the assistant\'s model at the time of upload.'
|
||||
? " If using Azure OpenAI, files are only available in the region of the assistant's model at the time of upload."
|
||||
: ''
|
||||
}`;
|
||||
return sendResponse(req, res, messageData, errorMessage);
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
// abortMiddleware.js
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { countTokens, isEnabled, sendEvent } = require('@librechat/api');
|
||||
const { isAssistantsEndpoint, ErrorTypes } = require('librechat-data-provider');
|
||||
const { sendMessage, sendError, countTokens, isEnabled } = require('~/server/utils');
|
||||
const { truncateText, smartTruncateText } = require('~/app/clients/prompts');
|
||||
const clearPendingReq = require('~/cache/clearPendingReq');
|
||||
const { sendError } = require('~/server/middleware/error');
|
||||
const { spendTokens } = require('~/models/spendTokens');
|
||||
const abortControllers = require('./abortControllers');
|
||||
const { saveMessage, getConvo } = require('~/models');
|
||||
const { abortRun } = require('./abortRun');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const abortDataMap = new WeakMap();
|
||||
|
||||
@@ -101,7 +101,7 @@ async function abortMessage(req, res) {
|
||||
cleanupAbortController(abortKey);
|
||||
|
||||
if (res.headersSent && finalEvent) {
|
||||
return sendMessage(res, finalEvent);
|
||||
return sendEvent(res, finalEvent);
|
||||
}
|
||||
|
||||
res.setHeader('Content-Type', 'application/json');
|
||||
@@ -174,7 +174,7 @@ const createAbortController = (req, res, getAbortData, getReqData) => {
|
||||
* @param {string} responseMessageId
|
||||
*/
|
||||
const onStart = (userMessage, responseMessageId) => {
|
||||
sendMessage(res, { message: userMessage, created: true });
|
||||
sendEvent(res, { message: userMessage, created: true });
|
||||
|
||||
const abortKey = userMessage?.conversationId ?? req.user.id;
|
||||
getReqData({ abortKey });
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
const { sendEvent } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { CacheKeys, RunStatus, isUUID } = require('librechat-data-provider');
|
||||
const { initializeClient } = require('~/server/services/Endpoints/assistants');
|
||||
const { checkMessageGaps, recordUsage } = require('~/server/services/Threads');
|
||||
const { deleteMessages } = require('~/models/Message');
|
||||
const { getConvo } = require('~/models/Conversation');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const { sendMessage } = require('~/server/utils');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const three_minutes = 1000 * 60 * 3;
|
||||
|
||||
@@ -34,7 +34,7 @@ async function abortRun(req, res) {
|
||||
const [thread_id, run_id] = runValues.split(':');
|
||||
|
||||
if (!run_id) {
|
||||
logger.warn('[abortRun] Couldn\'t find run for cancel request', { thread_id });
|
||||
logger.warn("[abortRun] Couldn't find run for cancel request", { thread_id });
|
||||
return res.status(204).send({ message: 'Run not found' });
|
||||
} else if (run_id === 'cancelled') {
|
||||
logger.warn('[abortRun] Run already cancelled', { thread_id });
|
||||
@@ -93,7 +93,7 @@ async function abortRun(req, res) {
|
||||
};
|
||||
|
||||
if (res.headersSent && finalEvent) {
|
||||
return sendMessage(res, finalEvent);
|
||||
return sendEvent(res, finalEvent);
|
||||
}
|
||||
|
||||
res.json(finalEvent);
|
||||
|
||||
@@ -7,7 +7,6 @@ const {
|
||||
} = require('librechat-data-provider');
|
||||
const azureAssistants = require('~/server/services/Endpoints/azureAssistants');
|
||||
const assistants = require('~/server/services/Endpoints/assistants');
|
||||
const gptPlugins = require('~/server/services/Endpoints/gptPlugins');
|
||||
const { processFiles } = require('~/server/services/Files/process');
|
||||
const anthropic = require('~/server/services/Endpoints/anthropic');
|
||||
const bedrock = require('~/server/services/Endpoints/bedrock');
|
||||
@@ -25,7 +24,6 @@ const buildFunction = {
|
||||
[EModelEndpoint.bedrock]: bedrock.buildOptions,
|
||||
[EModelEndpoint.azureOpenAI]: openAI.buildOptions,
|
||||
[EModelEndpoint.anthropic]: anthropic.buildOptions,
|
||||
[EModelEndpoint.gptPlugins]: gptPlugins.buildOptions,
|
||||
[EModelEndpoint.assistants]: assistants.buildOptions,
|
||||
[EModelEndpoint.azureAssistants]: azureAssistants.buildOptions,
|
||||
};
|
||||
@@ -60,15 +58,6 @@ async function buildEndpointOption(req, res, next) {
|
||||
return handleError(res, { text: 'Model spec mismatch' });
|
||||
}
|
||||
|
||||
if (
|
||||
currentModelSpec.preset.endpoint !== EModelEndpoint.gptPlugins &&
|
||||
currentModelSpec.preset.tools
|
||||
) {
|
||||
return handleError(res, {
|
||||
text: `Only the "${EModelEndpoint.gptPlugins}" endpoint can have tools defined in the preset`,
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
currentModelSpec.preset.spec = spec;
|
||||
if (currentModelSpec.iconURL != null && currentModelSpec.iconURL !== '') {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
const crypto = require('crypto');
|
||||
const { sendEvent } = require('@librechat/api');
|
||||
const { getResponseSender, Constants } = require('librechat-data-provider');
|
||||
const { sendMessage, sendError } = require('~/server/utils');
|
||||
const { sendError } = require('~/server/middleware/error');
|
||||
const { saveMessage } = require('~/models');
|
||||
|
||||
/**
|
||||
@@ -36,7 +37,7 @@ const denyRequest = async (req, res, errorMessage) => {
|
||||
isCreatedByUser: true,
|
||||
text,
|
||||
};
|
||||
sendMessage(res, { message: userMessage, created: true });
|
||||
sendEvent(res, { message: userMessage, created: true });
|
||||
|
||||
const shouldSaveMessage = _convoId && parentMessageId && parentMessageId !== Constants.NO_PARENT;
|
||||
|
||||
|
||||
@@ -1,31 +1,9 @@
|
||||
const crypto = require('crypto');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { parseConvo } = require('librechat-data-provider');
|
||||
const { sendEvent, handleError } = require('@librechat/api');
|
||||
const { saveMessage, getMessages } = require('~/models/Message');
|
||||
const { getConvo } = require('~/models/Conversation');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* Sends error data in Server Sent Events format and ends the response.
|
||||
* @param {object} res - The server response.
|
||||
* @param {string} message - The error message.
|
||||
*/
|
||||
const handleError = (res, message) => {
|
||||
res.write(`event: error\ndata: ${JSON.stringify(message)}\n\n`);
|
||||
res.end();
|
||||
};
|
||||
|
||||
/**
|
||||
* Sends message data in Server Sent Events format.
|
||||
* @param {Express.Response} res - - The server response.
|
||||
* @param {string | Object} message - The message to be sent.
|
||||
* @param {'message' | 'error' | 'cancel'} event - [Optional] The type of event. Default is 'message'.
|
||||
*/
|
||||
const sendMessage = (res, message, event = 'message') => {
|
||||
if (typeof message === 'string' && message.length === 0) {
|
||||
return;
|
||||
}
|
||||
res.write(`event: ${event}\ndata: ${JSON.stringify(message)}\n\n`);
|
||||
};
|
||||
|
||||
/**
|
||||
* Processes an error with provided options, saves the error message and sends a corresponding SSE response
|
||||
@@ -91,7 +69,7 @@ const sendError = async (req, res, options, callback) => {
|
||||
convo = parseConvo(errorMessage);
|
||||
}
|
||||
|
||||
return sendMessage(res, {
|
||||
return sendEvent(res, {
|
||||
final: true,
|
||||
requestMessage: query?.[0] ? query[0] : requestMessage,
|
||||
responseMessage: errorMessage,
|
||||
@@ -120,12 +98,10 @@ const sendResponse = (req, res, data, errorMessage) => {
|
||||
if (errorMessage) {
|
||||
return sendError(req, res, { ...data, text: errorMessage });
|
||||
}
|
||||
return sendMessage(res, data);
|
||||
return sendEvent(res, data);
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
sendResponse,
|
||||
handleError,
|
||||
sendMessage,
|
||||
sendError,
|
||||
sendResponse,
|
||||
};
|
||||
@@ -1,4 +1,5 @@
|
||||
const express = require('express');
|
||||
const { addTool } = require('@librechat/api');
|
||||
const { callTool, verifyToolAuth, getToolCalls } = require('~/server/controllers/tools');
|
||||
const { getAvailableTools } = require('~/server/controllers/PluginController');
|
||||
const { toolCallLimiter } = require('~/server/middleware/limiters');
|
||||
@@ -36,4 +37,12 @@ router.get('/:toolId/auth', verifyToolAuth);
|
||||
*/
|
||||
router.post('/:toolId/call', toolCallLimiter, callTool);
|
||||
|
||||
/**
|
||||
* Add a new tool to the system
|
||||
* @route POST /agents/tools/add
|
||||
* @param {object} req.body - Request body containing tool data
|
||||
* @returns {object} Created tool object
|
||||
*/
|
||||
router.post('/add', addTool);
|
||||
|
||||
module.exports = router;
|
||||
|
||||
@@ -1,207 +0,0 @@
|
||||
const express = require('express');
|
||||
const { getResponseSender } = require('librechat-data-provider');
|
||||
const {
|
||||
setHeaders,
|
||||
moderateText,
|
||||
validateModel,
|
||||
handleAbortError,
|
||||
validateEndpoint,
|
||||
buildEndpointOption,
|
||||
createAbortController,
|
||||
} = require('~/server/middleware');
|
||||
const { sendMessage, createOnProgress, formatSteps, formatAction } = require('~/server/utils');
|
||||
const { initializeClient } = require('~/server/services/Endpoints/gptPlugins');
|
||||
const { saveMessage, updateMessage } = require('~/models');
|
||||
const { validateTools } = require('~/app');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
router.use(moderateText);
|
||||
|
||||
router.post(
|
||||
'/',
|
||||
validateEndpoint,
|
||||
validateModel,
|
||||
buildEndpointOption,
|
||||
setHeaders,
|
||||
async (req, res) => {
|
||||
let {
|
||||
text,
|
||||
generation,
|
||||
endpointOption,
|
||||
conversationId,
|
||||
responseMessageId,
|
||||
isContinued = false,
|
||||
parentMessageId = null,
|
||||
overrideParentMessageId = null,
|
||||
} = req.body;
|
||||
|
||||
logger.debug('[/edit/gptPlugins]', {
|
||||
text,
|
||||
generation,
|
||||
isContinued,
|
||||
conversationId,
|
||||
...endpointOption,
|
||||
});
|
||||
|
||||
let userMessage;
|
||||
let userMessagePromise;
|
||||
let promptTokens;
|
||||
const sender = getResponseSender({
|
||||
...endpointOption,
|
||||
model: endpointOption.modelOptions.model,
|
||||
});
|
||||
const userMessageId = parentMessageId;
|
||||
const user = req.user.id;
|
||||
|
||||
const plugin = {
|
||||
loading: true,
|
||||
inputs: [],
|
||||
latest: null,
|
||||
outputs: null,
|
||||
};
|
||||
|
||||
const getReqData = (data = {}) => {
|
||||
for (let key in data) {
|
||||
if (key === 'userMessage') {
|
||||
userMessage = data[key];
|
||||
} else if (key === 'userMessagePromise') {
|
||||
userMessagePromise = data[key];
|
||||
} else if (key === 'responseMessageId') {
|
||||
responseMessageId = data[key];
|
||||
} else if (key === 'promptTokens') {
|
||||
promptTokens = data[key];
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const {
|
||||
onProgress: progressCallback,
|
||||
sendIntermediateMessage,
|
||||
getPartialText,
|
||||
} = createOnProgress({
|
||||
generation,
|
||||
onProgress: () => {
|
||||
if (plugin.loading === true) {
|
||||
plugin.loading = false;
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
const onChainEnd = (data) => {
|
||||
let { intermediateSteps: steps } = data;
|
||||
plugin.outputs = steps && steps[0].action ? formatSteps(steps) : 'An error occurred.';
|
||||
plugin.loading = false;
|
||||
saveMessage(
|
||||
req,
|
||||
{ ...userMessage, user },
|
||||
{ context: 'api/server/routes/ask/gptPlugins.js - onChainEnd' },
|
||||
);
|
||||
sendIntermediateMessage(res, {
|
||||
plugin,
|
||||
parentMessageId: userMessage.messageId,
|
||||
messageId: responseMessageId,
|
||||
});
|
||||
// logger.debug('CHAIN END', plugin.outputs);
|
||||
};
|
||||
|
||||
const getAbortData = () => ({
|
||||
sender,
|
||||
conversationId,
|
||||
userMessagePromise,
|
||||
messageId: responseMessageId,
|
||||
parentMessageId: overrideParentMessageId ?? userMessageId,
|
||||
text: getPartialText(),
|
||||
plugin: { ...plugin, loading: false },
|
||||
userMessage,
|
||||
promptTokens,
|
||||
});
|
||||
const { abortController, onStart } = createAbortController(req, res, getAbortData, getReqData);
|
||||
|
||||
try {
|
||||
endpointOption.tools = await validateTools(user, endpointOption.tools);
|
||||
const { client } = await initializeClient({ req, res, endpointOption });
|
||||
|
||||
const onAgentAction = (action, start = false) => {
|
||||
const formattedAction = formatAction(action);
|
||||
plugin.inputs.push(formattedAction);
|
||||
plugin.latest = formattedAction.plugin;
|
||||
if (!start && !client.skipSaveUserMessage) {
|
||||
saveMessage(
|
||||
req,
|
||||
{ ...userMessage, user },
|
||||
{ context: 'api/server/routes/ask/gptPlugins.js - onAgentAction' },
|
||||
);
|
||||
}
|
||||
sendIntermediateMessage(res, {
|
||||
plugin,
|
||||
parentMessageId: userMessage.messageId,
|
||||
messageId: responseMessageId,
|
||||
});
|
||||
// logger.debug('PLUGIN ACTION', formattedAction);
|
||||
};
|
||||
|
||||
let response = await client.sendMessage(text, {
|
||||
user,
|
||||
generation,
|
||||
isContinued,
|
||||
isEdited: true,
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
responseMessageId,
|
||||
overrideParentMessageId,
|
||||
getReqData,
|
||||
onAgentAction,
|
||||
onChainEnd,
|
||||
onStart,
|
||||
...endpointOption,
|
||||
progressCallback,
|
||||
progressOptions: {
|
||||
res,
|
||||
plugin,
|
||||
// parentMessageId: overrideParentMessageId || userMessageId,
|
||||
},
|
||||
abortController,
|
||||
});
|
||||
|
||||
if (overrideParentMessageId) {
|
||||
response.parentMessageId = overrideParentMessageId;
|
||||
}
|
||||
|
||||
logger.debug('[/edit/gptPlugins] CLIENT RESPONSE', response);
|
||||
|
||||
const { conversation = {} } = await response.databasePromise;
|
||||
delete response.databasePromise;
|
||||
conversation.title =
|
||||
conversation && !conversation.title ? null : conversation?.title || 'New Chat';
|
||||
|
||||
sendMessage(res, {
|
||||
title: conversation.title,
|
||||
final: true,
|
||||
conversation,
|
||||
requestMessage: userMessage,
|
||||
responseMessage: response,
|
||||
});
|
||||
res.end();
|
||||
|
||||
response.plugin = { ...plugin, loading: false };
|
||||
await updateMessage(
|
||||
req,
|
||||
{ ...response, user },
|
||||
{ context: 'api/server/routes/edit/gptPlugins.js' },
|
||||
);
|
||||
} catch (error) {
|
||||
const partialText = getPartialText();
|
||||
handleAbortError(res, req, error, {
|
||||
partialText,
|
||||
conversationId,
|
||||
sender,
|
||||
messageId: responseMessageId,
|
||||
parentMessageId: userMessageId ?? parentMessageId,
|
||||
});
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
module.exports = router;
|
||||
@@ -3,7 +3,6 @@ const openAI = require('./openAI');
|
||||
const custom = require('./custom');
|
||||
const google = require('./google');
|
||||
const anthropic = require('./anthropic');
|
||||
const gptPlugins = require('./gptPlugins');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
const {
|
||||
@@ -39,7 +38,6 @@ if (isEnabled(LIMIT_MESSAGE_USER)) {
|
||||
router.use(validateConvoAccess);
|
||||
|
||||
router.use([`/${EModelEndpoint.azureOpenAI}`, `/${EModelEndpoint.openAI}`], openAI);
|
||||
router.use(`/${EModelEndpoint.gptPlugins}`, gptPlugins);
|
||||
router.use(`/${EModelEndpoint.anthropic}`, anthropic);
|
||||
router.use(`/${EModelEndpoint.google}`, google);
|
||||
router.use(`/${EModelEndpoint.custom}`, custom);
|
||||
|
||||
@@ -283,7 +283,10 @@ router.post('/', async (req, res) => {
|
||||
message += ': ' + error.message;
|
||||
}
|
||||
|
||||
if (error.message?.includes('Invalid file format')) {
|
||||
if (
|
||||
error.message?.includes('Invalid file format') ||
|
||||
error.message?.includes('No OCR result')
|
||||
) {
|
||||
message = error.message;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
const { klona } = require('klona');
|
||||
const { sleep } = require('@librechat/agents');
|
||||
const { sendEvent } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const {
|
||||
StepTypes,
|
||||
RunStatus,
|
||||
@@ -11,11 +14,10 @@ const {
|
||||
} = require('librechat-data-provider');
|
||||
const { retrieveAndProcessFile } = require('~/server/services/Files/process');
|
||||
const { processRequiredActions } = require('~/server/services/ToolService');
|
||||
const { createOnProgress, sendMessage, sleep } = require('~/server/utils');
|
||||
const { RunManager, waitForRun } = require('~/server/services/Runs');
|
||||
const { processMessages } = require('~/server/services/Threads');
|
||||
const { createOnProgress } = require('~/server/utils');
|
||||
const { TextStream } = require('~/app/clients');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* Sorts, processes, and flattens messages to a single string.
|
||||
@@ -64,7 +66,7 @@ async function createOnTextProgress({
|
||||
};
|
||||
|
||||
logger.debug('Content data:', contentData);
|
||||
sendMessage(openai.res, contentData);
|
||||
sendEvent(openai.res, contentData);
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
const { isUserProvided } = require('@librechat/api');
|
||||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
const { isUserProvided, generateConfig } = require('~/server/utils');
|
||||
const { generateConfig } = require('~/server/utils/handleText');
|
||||
|
||||
const {
|
||||
OPENAI_API_KEY: openAIApiKey,
|
||||
|
||||
@@ -40,6 +40,7 @@ async function getBalanceConfig() {
|
||||
/**
|
||||
*
|
||||
* @param {string | EModelEndpoint} endpoint
|
||||
* @returns {Promise<TEndpoint | undefined>}
|
||||
*/
|
||||
const getCustomEndpointConfig = async (endpoint) => {
|
||||
const customConfig = await getCustomConfig();
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
const path = require('path');
|
||||
const {
|
||||
CacheKeys,
|
||||
configSchema,
|
||||
EImageOutputType,
|
||||
validateSettingDefinitions,
|
||||
agentParamSettings,
|
||||
paramSettings,
|
||||
} = require('librechat-data-provider');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const loadYaml = require('~/utils/loadYaml');
|
||||
const { logger } = require('~/config');
|
||||
const axios = require('axios');
|
||||
const yaml = require('js-yaml');
|
||||
const keyBy = require('lodash/keyBy');
|
||||
const { loadYaml } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const {
|
||||
CacheKeys,
|
||||
configSchema,
|
||||
paramSettings,
|
||||
EImageOutputType,
|
||||
agentParamSettings,
|
||||
validateSettingDefinitions,
|
||||
} = require('librechat-data-provider');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
|
||||
const projectRoot = path.resolve(__dirname, '..', '..', '..', '..');
|
||||
const defaultConfigPath = path.resolve(projectRoot, 'librechat.yaml');
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
jest.mock('axios');
|
||||
jest.mock('~/cache/getLogStores');
|
||||
jest.mock('~/utils/loadYaml');
|
||||
jest.mock('@librechat/api', () => ({
|
||||
...jest.requireActual('@librechat/api'),
|
||||
loadYaml: jest.fn(),
|
||||
}));
|
||||
jest.mock('librechat-data-provider', () => {
|
||||
const actual = jest.requireActual('librechat-data-provider');
|
||||
return {
|
||||
@@ -30,11 +33,22 @@ jest.mock('librechat-data-provider', () => {
|
||||
};
|
||||
});
|
||||
|
||||
jest.mock('@librechat/data-schemas', () => {
|
||||
return {
|
||||
logger: {
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
debug: jest.fn(),
|
||||
error: jest.fn(),
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
const axios = require('axios');
|
||||
const { loadYaml } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const loadCustomConfig = require('./loadCustomConfig');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const loadYaml = require('~/utils/loadYaml');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
describe('loadCustomConfig', () => {
|
||||
const mockSet = jest.fn();
|
||||
|
||||
@@ -11,30 +11,13 @@ const {
|
||||
replaceSpecialVars,
|
||||
providerEndpointMap,
|
||||
} = require('librechat-data-provider');
|
||||
const initAnthropic = require('~/server/services/Endpoints/anthropic/initialize');
|
||||
const getBedrockOptions = require('~/server/services/Endpoints/bedrock/options');
|
||||
const initOpenAI = require('~/server/services/Endpoints/openAI/initialize');
|
||||
const initCustom = require('~/server/services/Endpoints/custom/initialize');
|
||||
const initGoogle = require('~/server/services/Endpoints/google/initialize');
|
||||
const { getProviderConfig } = require('~/server/services/Endpoints');
|
||||
const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts');
|
||||
const { getCustomEndpointConfig } = require('~/server/services/Config');
|
||||
const { processFiles } = require('~/server/services/Files/process');
|
||||
const { getFiles, getToolFilesByIds } = require('~/models/File');
|
||||
const { getConvoFiles } = require('~/models/Conversation');
|
||||
const { getModelMaxTokens } = require('~/utils');
|
||||
|
||||
const providerConfigMap = {
|
||||
[Providers.XAI]: initCustom,
|
||||
[Providers.OLLAMA]: initCustom,
|
||||
[Providers.DEEPSEEK]: initCustom,
|
||||
[Providers.OPENROUTER]: initCustom,
|
||||
[EModelEndpoint.openAI]: initOpenAI,
|
||||
[EModelEndpoint.google]: initGoogle,
|
||||
[EModelEndpoint.azureOpenAI]: initOpenAI,
|
||||
[EModelEndpoint.anthropic]: initAnthropic,
|
||||
[EModelEndpoint.bedrock]: getBedrockOptions,
|
||||
};
|
||||
|
||||
/**
|
||||
* @param {object} params
|
||||
* @param {ServerRequest} params.req
|
||||
@@ -114,17 +97,9 @@ const initializeAgent = async ({
|
||||
})) ?? {};
|
||||
|
||||
agent.endpoint = provider;
|
||||
let getOptions = providerConfigMap[provider];
|
||||
if (!getOptions && providerConfigMap[provider.toLowerCase()] != null) {
|
||||
agent.provider = provider.toLowerCase();
|
||||
getOptions = providerConfigMap[agent.provider];
|
||||
} else if (!getOptions) {
|
||||
const customEndpointConfig = await getCustomEndpointConfig(provider);
|
||||
if (!customEndpointConfig) {
|
||||
throw new Error(`Provider ${provider} not supported`);
|
||||
}
|
||||
getOptions = initCustom;
|
||||
agent.provider = Providers.OPENAI;
|
||||
const { getOptions, overrideProvider } = await getProviderConfig(provider);
|
||||
if (overrideProvider) {
|
||||
agent.provider = overrideProvider;
|
||||
}
|
||||
|
||||
const _endpointOption =
|
||||
|
||||
@@ -23,7 +23,7 @@ const addTitle = async (req, { text, response, client }) => {
|
||||
let timeoutId;
|
||||
try {
|
||||
const timeoutPromise = new Promise((_, reject) => {
|
||||
timeoutId = setTimeout(() => reject(new Error('Title generation timeout')), 25000);
|
||||
timeoutId = setTimeout(() => reject(new Error('Title generation timeout')), 45000);
|
||||
}).catch((error) => {
|
||||
logger.error('Title error:', error);
|
||||
});
|
||||
|
||||
@@ -41,7 +41,7 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
|
||||
{
|
||||
reverseProxyUrl: ANTHROPIC_REVERSE_PROXY ?? null,
|
||||
proxy: PROXY ?? null,
|
||||
modelOptions: endpointOption.model_parameters,
|
||||
modelOptions: endpointOption?.model_parameters ?? {},
|
||||
},
|
||||
clientOptions,
|
||||
);
|
||||
|
||||
@@ -75,6 +75,7 @@ function getLLMConfig(apiKey, options = {}) {
|
||||
|
||||
if (options.reverseProxyUrl) {
|
||||
requestOptions.clientOptions.baseURL = options.reverseProxyUrl;
|
||||
requestOptions.anthropicApiUrl = options.reverseProxyUrl;
|
||||
}
|
||||
|
||||
return {
|
||||
|
||||
@@ -1,11 +1,45 @@
|
||||
const { anthropicSettings } = require('librechat-data-provider');
|
||||
const { anthropicSettings, removeNullishValues } = require('librechat-data-provider');
|
||||
const { getLLMConfig } = require('~/server/services/Endpoints/anthropic/llm');
|
||||
const { checkPromptCacheSupport, getClaudeHeaders, configureReasoning } = require('./helpers');
|
||||
|
||||
jest.mock('https-proxy-agent', () => ({
|
||||
HttpsProxyAgent: jest.fn().mockImplementation((proxy) => ({ proxy })),
|
||||
}));
|
||||
|
||||
jest.mock('./helpers', () => ({
|
||||
checkPromptCacheSupport: jest.fn(),
|
||||
getClaudeHeaders: jest.fn(),
|
||||
configureReasoning: jest.fn((requestOptions) => requestOptions),
|
||||
}));
|
||||
|
||||
jest.mock('librechat-data-provider', () => ({
|
||||
anthropicSettings: {
|
||||
model: { default: 'claude-3-opus-20240229' },
|
||||
maxOutputTokens: { default: 4096, reset: jest.fn(() => 4096) },
|
||||
thinking: { default: false },
|
||||
promptCache: { default: false },
|
||||
thinkingBudget: { default: null },
|
||||
},
|
||||
removeNullishValues: jest.fn((obj) => {
|
||||
const result = {};
|
||||
for (const key in obj) {
|
||||
if (obj[key] !== null && obj[key] !== undefined) {
|
||||
result[key] = obj[key];
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}),
|
||||
}));
|
||||
|
||||
describe('getLLMConfig', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
checkPromptCacheSupport.mockReturnValue(false);
|
||||
getClaudeHeaders.mockReturnValue(undefined);
|
||||
configureReasoning.mockImplementation((requestOptions) => requestOptions);
|
||||
anthropicSettings.maxOutputTokens.reset.mockReturnValue(4096);
|
||||
});
|
||||
|
||||
it('should create a basic configuration with default values', () => {
|
||||
const result = getLLMConfig('test-api-key', { modelOptions: {} });
|
||||
|
||||
@@ -36,6 +70,7 @@ describe('getLLMConfig', () => {
|
||||
});
|
||||
|
||||
expect(result.llmConfig.clientOptions).toHaveProperty('baseURL', 'http://reverse-proxy');
|
||||
expect(result.llmConfig).toHaveProperty('anthropicApiUrl', 'http://reverse-proxy');
|
||||
});
|
||||
|
||||
it('should include topK and topP for non-Claude-3.7 models', () => {
|
||||
@@ -65,6 +100,11 @@ describe('getLLMConfig', () => {
|
||||
});
|
||||
|
||||
it('should NOT include topK and topP for Claude-3-7 models (hyphen notation)', () => {
|
||||
configureReasoning.mockImplementation((requestOptions) => {
|
||||
requestOptions.thinking = { type: 'enabled' };
|
||||
return requestOptions;
|
||||
});
|
||||
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3-7-sonnet',
|
||||
@@ -78,6 +118,11 @@ describe('getLLMConfig', () => {
|
||||
});
|
||||
|
||||
it('should NOT include topK and topP for Claude-3.7 models (decimal notation)', () => {
|
||||
configureReasoning.mockImplementation((requestOptions) => {
|
||||
requestOptions.thinking = { type: 'enabled' };
|
||||
return requestOptions;
|
||||
});
|
||||
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3.7-sonnet',
|
||||
@@ -154,4 +199,160 @@ describe('getLLMConfig', () => {
|
||||
expect(result3.llmConfig).toHaveProperty('topK', 10);
|
||||
expect(result3.llmConfig).toHaveProperty('topP', 0.9);
|
||||
});
|
||||
|
||||
describe('Edge cases', () => {
|
||||
it('should handle missing apiKey', () => {
|
||||
const result = getLLMConfig(undefined, { modelOptions: {} });
|
||||
expect(result.llmConfig).not.toHaveProperty('apiKey');
|
||||
});
|
||||
|
||||
it('should handle empty modelOptions', () => {
|
||||
expect(() => {
|
||||
getLLMConfig('test-api-key', {});
|
||||
}).toThrow("Cannot read properties of undefined (reading 'thinking')");
|
||||
});
|
||||
|
||||
it('should handle no options parameter', () => {
|
||||
expect(() => {
|
||||
getLLMConfig('test-api-key');
|
||||
}).toThrow("Cannot read properties of undefined (reading 'thinking')");
|
||||
});
|
||||
|
||||
it('should handle temperature, stop sequences, and stream settings', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
temperature: 0.7,
|
||||
stop: ['\n\n', 'END'],
|
||||
stream: false,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig).toHaveProperty('temperature', 0.7);
|
||||
expect(result.llmConfig).toHaveProperty('stopSequences', ['\n\n', 'END']);
|
||||
expect(result.llmConfig).toHaveProperty('stream', false);
|
||||
});
|
||||
|
||||
it('should handle maxOutputTokens when explicitly set to falsy value', () => {
|
||||
anthropicSettings.maxOutputTokens.reset.mockReturnValue(8192);
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3-opus',
|
||||
maxOutputTokens: null,
|
||||
},
|
||||
});
|
||||
|
||||
expect(anthropicSettings.maxOutputTokens.reset).toHaveBeenCalledWith('claude-3-opus');
|
||||
expect(result.llmConfig).toHaveProperty('maxTokens', 8192);
|
||||
});
|
||||
|
||||
it('should handle both proxy and reverseProxyUrl', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {},
|
||||
proxy: 'http://proxy:8080',
|
||||
reverseProxyUrl: 'https://reverse-proxy.com',
|
||||
});
|
||||
|
||||
expect(result.llmConfig.clientOptions).toHaveProperty('fetchOptions');
|
||||
expect(result.llmConfig.clientOptions.fetchOptions).toHaveProperty('dispatcher');
|
||||
expect(result.llmConfig.clientOptions.fetchOptions.dispatcher).toBeDefined();
|
||||
expect(result.llmConfig.clientOptions.fetchOptions.dispatcher.constructor.name).toBe(
|
||||
'ProxyAgent',
|
||||
);
|
||||
expect(result.llmConfig.clientOptions).toHaveProperty('baseURL', 'https://reverse-proxy.com');
|
||||
expect(result.llmConfig).toHaveProperty('anthropicApiUrl', 'https://reverse-proxy.com');
|
||||
});
|
||||
|
||||
it('should handle prompt cache with supported model', () => {
|
||||
checkPromptCacheSupport.mockReturnValue(true);
|
||||
getClaudeHeaders.mockReturnValue({ 'anthropic-beta': 'prompt-caching-2024-07-31' });
|
||||
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3-5-sonnet',
|
||||
promptCache: true,
|
||||
},
|
||||
});
|
||||
|
||||
expect(checkPromptCacheSupport).toHaveBeenCalledWith('claude-3-5-sonnet');
|
||||
expect(getClaudeHeaders).toHaveBeenCalledWith('claude-3-5-sonnet', true);
|
||||
expect(result.llmConfig.clientOptions.defaultHeaders).toEqual({
|
||||
'anthropic-beta': 'prompt-caching-2024-07-31',
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle thinking and thinkingBudget options', () => {
|
||||
configureReasoning.mockImplementation((requestOptions, systemOptions) => {
|
||||
if (systemOptions.thinking) {
|
||||
requestOptions.thinking = { type: 'enabled' };
|
||||
}
|
||||
if (systemOptions.thinkingBudget) {
|
||||
requestOptions.thinking = {
|
||||
...requestOptions.thinking,
|
||||
budget_tokens: systemOptions.thinkingBudget,
|
||||
};
|
||||
}
|
||||
return requestOptions;
|
||||
});
|
||||
|
||||
getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3-7-sonnet',
|
||||
thinking: true,
|
||||
thinkingBudget: 5000,
|
||||
},
|
||||
});
|
||||
|
||||
expect(configureReasoning).toHaveBeenCalledWith(
|
||||
expect.any(Object),
|
||||
expect.objectContaining({
|
||||
thinking: true,
|
||||
promptCache: false,
|
||||
thinkingBudget: 5000,
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should remove system options from modelOptions', () => {
|
||||
const modelOptions = {
|
||||
model: 'claude-3-opus',
|
||||
thinking: true,
|
||||
promptCache: true,
|
||||
thinkingBudget: 1000,
|
||||
temperature: 0.5,
|
||||
};
|
||||
|
||||
getLLMConfig('test-api-key', { modelOptions });
|
||||
|
||||
expect(modelOptions).not.toHaveProperty('thinking');
|
||||
expect(modelOptions).not.toHaveProperty('promptCache');
|
||||
expect(modelOptions).not.toHaveProperty('thinkingBudget');
|
||||
expect(modelOptions).toHaveProperty('temperature', 0.5);
|
||||
});
|
||||
|
||||
it('should handle all nullish values removal', () => {
|
||||
removeNullishValues.mockImplementation((obj) => {
|
||||
const cleaned = {};
|
||||
Object.entries(obj).forEach(([key, value]) => {
|
||||
if (value !== null && value !== undefined) {
|
||||
cleaned[key] = value;
|
||||
}
|
||||
});
|
||||
return cleaned;
|
||||
});
|
||||
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
temperature: null,
|
||||
topP: undefined,
|
||||
topK: 0,
|
||||
stop: [],
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig).not.toHaveProperty('temperature');
|
||||
expect(result.llmConfig).not.toHaveProperty('topP');
|
||||
expect(result.llmConfig).toHaveProperty('topK', 0);
|
||||
expect(result.llmConfig).toHaveProperty('stopSequences', []);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,12 +1,7 @@
|
||||
const OpenAI = require('openai');
|
||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||
const { constructAzureURL, isUserProvided } = require('@librechat/api');
|
||||
const {
|
||||
ErrorTypes,
|
||||
EModelEndpoint,
|
||||
resolveHeaders,
|
||||
mapModelToAzureConfig,
|
||||
} = require('librechat-data-provider');
|
||||
const { constructAzureURL, isUserProvided, resolveHeaders } = require('@librechat/api');
|
||||
const { ErrorTypes, EModelEndpoint, mapModelToAzureConfig } = require('librechat-data-provider');
|
||||
const {
|
||||
getUserKeyValues,
|
||||
getUserKeyExpiry,
|
||||
@@ -114,11 +109,14 @@ const initializeClient = async ({ req, res, version, endpointOption, initAppClie
|
||||
|
||||
apiKey = azureOptions.azureOpenAIApiKey;
|
||||
opts.defaultQuery = { 'api-version': azureOptions.azureOpenAIApiVersion };
|
||||
opts.defaultHeaders = resolveHeaders({
|
||||
...headers,
|
||||
'api-key': apiKey,
|
||||
'OpenAI-Beta': `assistants=${version}`,
|
||||
});
|
||||
opts.defaultHeaders = resolveHeaders(
|
||||
{
|
||||
...headers,
|
||||
'api-key': apiKey,
|
||||
'OpenAI-Beta': `assistants=${version}`,
|
||||
},
|
||||
req.user,
|
||||
);
|
||||
opts.model = azureOptions.azureOpenAIApiDeploymentName;
|
||||
|
||||
if (initAppClient) {
|
||||
|
||||
@@ -64,7 +64,7 @@ const getOptions = async ({ req, overrideModel, endpointOption }) => {
|
||||
|
||||
/** @type {BedrockClientOptions} */
|
||||
const requestOptions = {
|
||||
model: overrideModel ?? endpointOption.model,
|
||||
model: overrideModel ?? endpointOption?.model,
|
||||
region: BEDROCK_AWS_DEFAULT_REGION,
|
||||
};
|
||||
|
||||
@@ -76,7 +76,7 @@ const getOptions = async ({ req, overrideModel, endpointOption }) => {
|
||||
|
||||
const llmConfig = bedrockOutputParser(
|
||||
bedrockInputParser.parse(
|
||||
removeNullishValues(Object.assign(requestOptions, endpointOption.model_parameters)),
|
||||
removeNullishValues(Object.assign(requestOptions, endpointOption?.model_parameters ?? {})),
|
||||
),
|
||||
);
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ const {
|
||||
extractEnvVariable,
|
||||
} = require('librechat-data-provider');
|
||||
const { Providers } = require('@librechat/agents');
|
||||
const { getOpenAIConfig, createHandleLLMNewToken } = require('@librechat/api');
|
||||
const { getOpenAIConfig, createHandleLLMNewToken, resolveHeaders } = require('@librechat/api');
|
||||
const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
|
||||
const { getCustomEndpointConfig } = require('~/server/services/Config');
|
||||
const { fetchModels } = require('~/server/services/ModelService');
|
||||
@@ -28,12 +28,7 @@ const initializeClient = async ({ req, res, endpointOption, optionsOnly, overrid
|
||||
const CUSTOM_API_KEY = extractEnvVariable(endpointConfig.apiKey);
|
||||
const CUSTOM_BASE_URL = extractEnvVariable(endpointConfig.baseURL);
|
||||
|
||||
let resolvedHeaders = {};
|
||||
if (endpointConfig.headers && typeof endpointConfig.headers === 'object') {
|
||||
Object.keys(endpointConfig.headers).forEach((key) => {
|
||||
resolvedHeaders[key] = extractEnvVariable(endpointConfig.headers[key]);
|
||||
});
|
||||
}
|
||||
let resolvedHeaders = resolveHeaders(endpointConfig.headers, req.user);
|
||||
|
||||
if (CUSTOM_API_KEY.match(envVarRegex)) {
|
||||
throw new Error(`Missing API Key for ${endpoint}.`);
|
||||
@@ -134,7 +129,7 @@ const initializeClient = async ({ req, res, endpointOption, optionsOnly, overrid
|
||||
};
|
||||
|
||||
if (optionsOnly) {
|
||||
const modelOptions = endpointOption.model_parameters;
|
||||
const modelOptions = endpointOption?.model_parameters ?? {};
|
||||
if (endpoint !== Providers.OLLAMA) {
|
||||
clientOptions = Object.assign(
|
||||
{
|
||||
|
||||
93
api/server/services/Endpoints/custom/initialize.spec.js
Normal file
93
api/server/services/Endpoints/custom/initialize.spec.js
Normal file
@@ -0,0 +1,93 @@
|
||||
const initializeClient = require('./initialize');
|
||||
|
||||
jest.mock('@librechat/api', () => ({
|
||||
resolveHeaders: jest.fn(),
|
||||
getOpenAIConfig: jest.fn(),
|
||||
createHandleLLMNewToken: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('librechat-data-provider', () => ({
|
||||
CacheKeys: { TOKEN_CONFIG: 'token_config' },
|
||||
ErrorTypes: { NO_USER_KEY: 'NO_USER_KEY', NO_BASE_URL: 'NO_BASE_URL' },
|
||||
envVarRegex: /\$\{([^}]+)\}/,
|
||||
FetchTokenConfig: {},
|
||||
extractEnvVariable: jest.fn((value) => value),
|
||||
}));
|
||||
|
||||
jest.mock('@librechat/agents', () => ({
|
||||
Providers: { OLLAMA: 'ollama' },
|
||||
}));
|
||||
|
||||
jest.mock('~/server/services/UserService', () => ({
|
||||
getUserKeyValues: jest.fn(),
|
||||
checkUserKeyExpiry: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('~/server/services/Config', () => ({
|
||||
getCustomEndpointConfig: jest.fn().mockResolvedValue({
|
||||
apiKey: 'test-key',
|
||||
baseURL: 'https://test.com',
|
||||
headers: { 'x-user': '{{LIBRECHAT_USER_ID}}', 'x-email': '{{LIBRECHAT_USER_EMAIL}}' },
|
||||
models: { default: ['test-model'] },
|
||||
}),
|
||||
}));
|
||||
|
||||
jest.mock('~/server/services/ModelService', () => ({
|
||||
fetchModels: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('~/app/clients/OpenAIClient', () => {
|
||||
return jest.fn().mockImplementation(() => ({
|
||||
options: {},
|
||||
}));
|
||||
});
|
||||
|
||||
jest.mock('~/server/utils', () => ({
|
||||
isUserProvided: jest.fn().mockReturnValue(false),
|
||||
}));
|
||||
|
||||
jest.mock('~/cache/getLogStores', () =>
|
||||
jest.fn().mockReturnValue({
|
||||
get: jest.fn(),
|
||||
}),
|
||||
);
|
||||
|
||||
describe('custom/initializeClient', () => {
|
||||
const mockRequest = {
|
||||
body: { endpoint: 'test-endpoint' },
|
||||
user: { id: 'user-123', email: 'test@example.com' },
|
||||
app: { locals: {} },
|
||||
};
|
||||
const mockResponse = {};
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
it('calls resolveHeaders with headers and user', async () => {
|
||||
const { resolveHeaders } = require('@librechat/api');
|
||||
await initializeClient({ req: mockRequest, res: mockResponse, optionsOnly: true });
|
||||
expect(resolveHeaders).toHaveBeenCalledWith(
|
||||
{ 'x-user': '{{LIBRECHAT_USER_ID}}', 'x-email': '{{LIBRECHAT_USER_EMAIL}}' },
|
||||
{ id: 'user-123', email: 'test@example.com' },
|
||||
);
|
||||
});
|
||||
|
||||
it('throws if endpoint config is missing', async () => {
|
||||
const { getCustomEndpointConfig } = require('~/server/services/Config');
|
||||
getCustomEndpointConfig.mockResolvedValueOnce(null);
|
||||
await expect(
|
||||
initializeClient({ req: mockRequest, res: mockResponse, optionsOnly: true }),
|
||||
).rejects.toThrow('Config not found for the test-endpoint custom endpoint.');
|
||||
});
|
||||
|
||||
it('throws if user is missing', async () => {
|
||||
await expect(
|
||||
initializeClient({
|
||||
req: { ...mockRequest, user: undefined },
|
||||
res: mockResponse,
|
||||
optionsOnly: true,
|
||||
}),
|
||||
).rejects.toThrow("Cannot read properties of undefined (reading 'id')");
|
||||
});
|
||||
});
|
||||
@@ -1,7 +1,6 @@
|
||||
const { getGoogleConfig, isEnabled } = require('@librechat/api');
|
||||
const { EModelEndpoint, AuthKeys } = require('librechat-data-provider');
|
||||
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
|
||||
const { getLLMConfig } = require('~/server/services/Endpoints/google/llm');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
const { GoogleClient } = require('~/app');
|
||||
|
||||
const initializeClient = async ({ req, res, endpointOption, overrideModel, optionsOnly }) => {
|
||||
@@ -18,7 +17,7 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
|
||||
let serviceKey = {};
|
||||
try {
|
||||
serviceKey = require('~/data/auth.json');
|
||||
} catch (e) {
|
||||
} catch (_e) {
|
||||
// Do nothing
|
||||
}
|
||||
|
||||
@@ -58,14 +57,14 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
|
||||
if (optionsOnly) {
|
||||
clientOptions = Object.assign(
|
||||
{
|
||||
modelOptions: endpointOption.model_parameters,
|
||||
modelOptions: endpointOption?.model_parameters ?? {},
|
||||
},
|
||||
clientOptions,
|
||||
);
|
||||
if (overrideModel) {
|
||||
clientOptions.modelOptions.model = overrideModel;
|
||||
}
|
||||
return getLLMConfig(credentials, clientOptions);
|
||||
return getGoogleConfig(credentials, clientOptions);
|
||||
}
|
||||
|
||||
const client = new GoogleClient(credentials, clientOptions);
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
const { removeNullishValues } = require('librechat-data-provider');
|
||||
const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts');
|
||||
|
||||
const buildOptions = (endpoint, parsedBody) => {
|
||||
const {
|
||||
modelLabel,
|
||||
chatGptLabel,
|
||||
promptPrefix,
|
||||
agentOptions,
|
||||
tools = [],
|
||||
iconURL,
|
||||
greeting,
|
||||
spec,
|
||||
maxContextTokens,
|
||||
artifacts,
|
||||
...modelOptions
|
||||
} = parsedBody;
|
||||
const endpointOption = removeNullishValues({
|
||||
endpoint,
|
||||
tools: tools
|
||||
.map((tool) => tool?.pluginKey ?? tool)
|
||||
.filter((toolName) => typeof toolName === 'string'),
|
||||
modelLabel,
|
||||
chatGptLabel,
|
||||
promptPrefix,
|
||||
agentOptions,
|
||||
iconURL,
|
||||
greeting,
|
||||
spec,
|
||||
maxContextTokens,
|
||||
modelOptions,
|
||||
});
|
||||
|
||||
if (typeof artifacts === 'string') {
|
||||
endpointOption.artifactsPrompt = generateArtifactsPrompt({ endpoint, artifacts });
|
||||
}
|
||||
|
||||
return endpointOption;
|
||||
};
|
||||
|
||||
module.exports = buildOptions;
|
||||
@@ -1,7 +0,0 @@
|
||||
const buildOptions = require('./build');
|
||||
const initializeClient = require('./initialize');
|
||||
|
||||
module.exports = {
|
||||
buildOptions,
|
||||
initializeClient,
|
||||
};
|
||||
@@ -1,134 +0,0 @@
|
||||
const {
|
||||
EModelEndpoint,
|
||||
resolveHeaders,
|
||||
mapModelToAzureConfig,
|
||||
} = require('librechat-data-provider');
|
||||
const { isEnabled, isUserProvided, getAzureCredentials } = require('@librechat/api');
|
||||
const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
|
||||
const { PluginsClient } = require('~/app');
|
||||
|
||||
const initializeClient = async ({ req, res, endpointOption }) => {
|
||||
const {
|
||||
PROXY,
|
||||
OPENAI_API_KEY,
|
||||
AZURE_API_KEY,
|
||||
PLUGINS_USE_AZURE,
|
||||
OPENAI_REVERSE_PROXY,
|
||||
AZURE_OPENAI_BASEURL,
|
||||
OPENAI_SUMMARIZE,
|
||||
DEBUG_PLUGINS,
|
||||
} = process.env;
|
||||
|
||||
const { key: expiresAt, model: modelName } = req.body;
|
||||
const contextStrategy = isEnabled(OPENAI_SUMMARIZE) ? 'summarize' : null;
|
||||
|
||||
let useAzure = isEnabled(PLUGINS_USE_AZURE);
|
||||
let endpoint = useAzure ? EModelEndpoint.azureOpenAI : EModelEndpoint.openAI;
|
||||
|
||||
/** @type {false | TAzureConfig} */
|
||||
const azureConfig = req.app.locals[EModelEndpoint.azureOpenAI];
|
||||
useAzure = useAzure || azureConfig?.plugins;
|
||||
|
||||
if (useAzure && endpoint !== EModelEndpoint.azureOpenAI) {
|
||||
endpoint = EModelEndpoint.azureOpenAI;
|
||||
}
|
||||
|
||||
const credentials = {
|
||||
[EModelEndpoint.openAI]: OPENAI_API_KEY,
|
||||
[EModelEndpoint.azureOpenAI]: AZURE_API_KEY,
|
||||
};
|
||||
|
||||
const baseURLOptions = {
|
||||
[EModelEndpoint.openAI]: OPENAI_REVERSE_PROXY,
|
||||
[EModelEndpoint.azureOpenAI]: AZURE_OPENAI_BASEURL,
|
||||
};
|
||||
|
||||
const userProvidesKey = isUserProvided(credentials[endpoint]);
|
||||
const userProvidesURL = isUserProvided(baseURLOptions[endpoint]);
|
||||
|
||||
let userValues = null;
|
||||
if (expiresAt && (userProvidesKey || userProvidesURL)) {
|
||||
checkUserKeyExpiry(expiresAt, endpoint);
|
||||
userValues = await getUserKeyValues({ userId: req.user.id, name: endpoint });
|
||||
}
|
||||
|
||||
let apiKey = userProvidesKey ? userValues?.apiKey : credentials[endpoint];
|
||||
let baseURL = userProvidesURL ? userValues?.baseURL : baseURLOptions[endpoint];
|
||||
|
||||
const clientOptions = {
|
||||
contextStrategy,
|
||||
debug: isEnabled(DEBUG_PLUGINS),
|
||||
reverseProxyUrl: baseURL ? baseURL : null,
|
||||
proxy: PROXY ?? null,
|
||||
req,
|
||||
res,
|
||||
...endpointOption,
|
||||
};
|
||||
|
||||
if (useAzure && azureConfig) {
|
||||
const { modelGroupMap, groupMap } = azureConfig;
|
||||
const {
|
||||
azureOptions,
|
||||
baseURL,
|
||||
headers = {},
|
||||
serverless,
|
||||
} = mapModelToAzureConfig({
|
||||
modelName,
|
||||
modelGroupMap,
|
||||
groupMap,
|
||||
});
|
||||
|
||||
clientOptions.reverseProxyUrl = baseURL ?? clientOptions.reverseProxyUrl;
|
||||
clientOptions.headers = resolveHeaders({ ...headers, ...(clientOptions.headers ?? {}) });
|
||||
|
||||
clientOptions.titleConvo = azureConfig.titleConvo;
|
||||
clientOptions.titleModel = azureConfig.titleModel;
|
||||
clientOptions.titleMethod = azureConfig.titleMethod ?? 'completion';
|
||||
|
||||
const azureRate = modelName.includes('gpt-4') ? 30 : 17;
|
||||
clientOptions.streamRate = azureConfig.streamRate ?? azureRate;
|
||||
|
||||
const groupName = modelGroupMap[modelName].group;
|
||||
clientOptions.addParams = azureConfig.groupMap[groupName].addParams;
|
||||
clientOptions.dropParams = azureConfig.groupMap[groupName].dropParams;
|
||||
clientOptions.forcePrompt = azureConfig.groupMap[groupName].forcePrompt;
|
||||
|
||||
apiKey = azureOptions.azureOpenAIApiKey;
|
||||
clientOptions.azure = !serverless && azureOptions;
|
||||
if (serverless === true) {
|
||||
clientOptions.defaultQuery = azureOptions.azureOpenAIApiVersion
|
||||
? { 'api-version': azureOptions.azureOpenAIApiVersion }
|
||||
: undefined;
|
||||
clientOptions.headers['api-key'] = apiKey;
|
||||
}
|
||||
} else if (useAzure || (apiKey && apiKey.includes('{"azure') && !clientOptions.azure)) {
|
||||
clientOptions.azure = userProvidesKey ? JSON.parse(userValues.apiKey) : getAzureCredentials();
|
||||
apiKey = clientOptions.azure.azureOpenAIApiKey;
|
||||
}
|
||||
|
||||
/** @type {undefined | TBaseEndpoint} */
|
||||
const pluginsConfig = req.app.locals[EModelEndpoint.gptPlugins];
|
||||
|
||||
if (!useAzure && pluginsConfig) {
|
||||
clientOptions.streamRate = pluginsConfig.streamRate;
|
||||
}
|
||||
|
||||
/** @type {undefined | TBaseEndpoint} */
|
||||
const allConfig = req.app.locals.all;
|
||||
if (allConfig) {
|
||||
clientOptions.streamRate = allConfig.streamRate;
|
||||
}
|
||||
|
||||
if (!apiKey) {
|
||||
throw new Error(`${endpoint} API key not provided. Please provide it again.`);
|
||||
}
|
||||
|
||||
const client = new PluginsClient(apiKey, clientOptions);
|
||||
return {
|
||||
client,
|
||||
azure: clientOptions.azure,
|
||||
openAIApiKey: apiKey,
|
||||
};
|
||||
};
|
||||
|
||||
module.exports = initializeClient;
|
||||
@@ -1,410 +0,0 @@
|
||||
// gptPlugins/initializeClient.spec.js
|
||||
jest.mock('~/cache/getLogStores');
|
||||
const { EModelEndpoint, ErrorTypes, validateAzureGroups } = require('librechat-data-provider');
|
||||
const { getUserKey, getUserKeyValues } = require('~/server/services/UserService');
|
||||
const initializeClient = require('./initialize');
|
||||
const { PluginsClient } = require('~/app');
|
||||
|
||||
// Mock getUserKey since it's the only function we want to mock
|
||||
jest.mock('~/server/services/UserService', () => ({
|
||||
getUserKey: jest.fn(),
|
||||
getUserKeyValues: jest.fn(),
|
||||
checkUserKeyExpiry: jest.requireActual('~/server/services/UserService').checkUserKeyExpiry,
|
||||
}));
|
||||
|
||||
describe('gptPlugins/initializeClient', () => {
|
||||
// Set up environment variables
|
||||
const originalEnvironment = process.env;
|
||||
const app = {
|
||||
locals: {},
|
||||
};
|
||||
|
||||
const validAzureConfigs = [
|
||||
{
|
||||
group: 'librechat-westus',
|
||||
apiKey: 'WESTUS_API_KEY',
|
||||
instanceName: 'librechat-westus',
|
||||
version: '2023-12-01-preview',
|
||||
models: {
|
||||
'gpt-4-vision-preview': {
|
||||
deploymentName: 'gpt-4-vision-preview',
|
||||
version: '2024-02-15-preview',
|
||||
},
|
||||
'gpt-3.5-turbo': {
|
||||
deploymentName: 'gpt-35-turbo',
|
||||
},
|
||||
'gpt-3.5-turbo-1106': {
|
||||
deploymentName: 'gpt-35-turbo-1106',
|
||||
},
|
||||
'gpt-4': {
|
||||
deploymentName: 'gpt-4',
|
||||
},
|
||||
'gpt-4-1106-preview': {
|
||||
deploymentName: 'gpt-4-1106-preview',
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
group: 'librechat-eastus',
|
||||
apiKey: 'EASTUS_API_KEY',
|
||||
instanceName: 'librechat-eastus',
|
||||
deploymentName: 'gpt-4-turbo',
|
||||
version: '2024-02-15-preview',
|
||||
models: {
|
||||
'gpt-4-turbo': true,
|
||||
},
|
||||
baseURL: 'https://eastus.example.com',
|
||||
additionalHeaders: {
|
||||
'x-api-key': 'x-api-key-value',
|
||||
},
|
||||
},
|
||||
{
|
||||
group: 'mistral-inference',
|
||||
apiKey: 'AZURE_MISTRAL_API_KEY',
|
||||
baseURL:
|
||||
'https://Mistral-large-vnpet-serverless.region.inference.ai.azure.com/v1/chat/completions',
|
||||
serverless: true,
|
||||
models: {
|
||||
'mistral-large': true,
|
||||
},
|
||||
},
|
||||
{
|
||||
group: 'llama-70b-chat',
|
||||
apiKey: 'AZURE_LLAMA2_70B_API_KEY',
|
||||
baseURL:
|
||||
'https://Llama-2-70b-chat-qmvyb-serverless.region.inference.ai.azure.com/v1/chat/completions',
|
||||
serverless: true,
|
||||
models: {
|
||||
'llama-70b-chat': true,
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
const { modelNames, modelGroupMap, groupMap } = validateAzureGroups(validAzureConfigs);
|
||||
|
||||
beforeEach(() => {
|
||||
jest.resetModules(); // Clears the cache
|
||||
process.env = { ...originalEnvironment }; // Make a copy
|
||||
});
|
||||
|
||||
afterAll(() => {
|
||||
process.env = originalEnvironment; // Restore original env vars
|
||||
});
|
||||
|
||||
test('should initialize PluginsClient with OpenAI API key and default options', async () => {
|
||||
process.env.OPENAI_API_KEY = 'test-openai-api-key';
|
||||
process.env.PLUGINS_USE_AZURE = 'false';
|
||||
process.env.DEBUG_PLUGINS = 'false';
|
||||
process.env.OPENAI_SUMMARIZE = 'false';
|
||||
|
||||
const req = {
|
||||
body: { key: null },
|
||||
user: { id: '123' },
|
||||
app,
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = { modelOptions: { model: 'default-model' } };
|
||||
|
||||
const { client, openAIApiKey } = await initializeClient({ req, res, endpointOption });
|
||||
|
||||
expect(openAIApiKey).toBe('test-openai-api-key');
|
||||
expect(client).toBeInstanceOf(PluginsClient);
|
||||
});
|
||||
|
||||
test('should initialize PluginsClient with Azure credentials when PLUGINS_USE_AZURE is true', async () => {
|
||||
process.env.AZURE_API_KEY = 'test-azure-api-key';
|
||||
(process.env.AZURE_OPENAI_API_INSTANCE_NAME = 'some-value'),
|
||||
(process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME = 'some-value'),
|
||||
(process.env.AZURE_OPENAI_API_VERSION = 'some-value'),
|
||||
(process.env.AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME = 'some-value'),
|
||||
(process.env.AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME = 'some-value'),
|
||||
(process.env.PLUGINS_USE_AZURE = 'true');
|
||||
process.env.DEBUG_PLUGINS = 'false';
|
||||
process.env.OPENAI_SUMMARIZE = 'false';
|
||||
|
||||
const req = {
|
||||
body: { key: null },
|
||||
user: { id: '123' },
|
||||
app,
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = { modelOptions: { model: 'test-model' } };
|
||||
|
||||
const { client, azure } = await initializeClient({ req, res, endpointOption });
|
||||
|
||||
expect(azure.azureOpenAIApiKey).toBe('test-azure-api-key');
|
||||
expect(client).toBeInstanceOf(PluginsClient);
|
||||
});
|
||||
|
||||
test('should use the debug option when DEBUG_PLUGINS is enabled', async () => {
|
||||
process.env.OPENAI_API_KEY = 'test-openai-api-key';
|
||||
process.env.DEBUG_PLUGINS = 'true';
|
||||
|
||||
const req = {
|
||||
body: { key: null },
|
||||
user: { id: '123' },
|
||||
app,
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = { modelOptions: { model: 'default-model' } };
|
||||
|
||||
const { client } = await initializeClient({ req, res, endpointOption });
|
||||
|
||||
expect(client.options.debug).toBe(true);
|
||||
});
|
||||
|
||||
test('should set contextStrategy to summarize when OPENAI_SUMMARIZE is enabled', async () => {
|
||||
process.env.OPENAI_API_KEY = 'test-openai-api-key';
|
||||
process.env.OPENAI_SUMMARIZE = 'true';
|
||||
|
||||
const req = {
|
||||
body: { key: null },
|
||||
user: { id: '123' },
|
||||
app,
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = { modelOptions: { model: 'default-model' } };
|
||||
|
||||
const { client } = await initializeClient({ req, res, endpointOption });
|
||||
|
||||
expect(client.options.contextStrategy).toBe('summarize');
|
||||
});
|
||||
|
||||
// ... additional tests for reverseProxyUrl, proxy, user-provided keys, etc.
|
||||
|
||||
test('should throw an error if no API keys are provided in the environment', async () => {
|
||||
// Clear the environment variables for API keys
|
||||
delete process.env.OPENAI_API_KEY;
|
||||
delete process.env.AZURE_API_KEY;
|
||||
|
||||
const req = {
|
||||
body: { key: null },
|
||||
user: { id: '123' },
|
||||
app,
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = { modelOptions: { model: 'default-model' } };
|
||||
|
||||
await expect(initializeClient({ req, res, endpointOption })).rejects.toThrow(
|
||||
`${EModelEndpoint.openAI} API key not provided.`,
|
||||
);
|
||||
});
|
||||
|
||||
// Additional tests for gptPlugins/initializeClient.spec.js
|
||||
|
||||
// ... (previous test setup code)
|
||||
|
||||
test('should handle user-provided OpenAI keys and check expiry', async () => {
|
||||
process.env.OPENAI_API_KEY = 'user_provided';
|
||||
process.env.PLUGINS_USE_AZURE = 'false';
|
||||
|
||||
const futureDate = new Date(Date.now() + 10000).toISOString();
|
||||
const req = {
|
||||
body: { key: futureDate },
|
||||
user: { id: '123' },
|
||||
app,
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = { modelOptions: { model: 'default-model' } };
|
||||
|
||||
getUserKeyValues.mockResolvedValue({ apiKey: 'test-user-provided-openai-api-key' });
|
||||
|
||||
const { openAIApiKey } = await initializeClient({ req, res, endpointOption });
|
||||
|
||||
expect(openAIApiKey).toBe('test-user-provided-openai-api-key');
|
||||
});
|
||||
|
||||
test('should handle user-provided Azure keys and check expiry', async () => {
|
||||
process.env.AZURE_API_KEY = 'user_provided';
|
||||
process.env.PLUGINS_USE_AZURE = 'true';
|
||||
|
||||
const futureDate = new Date(Date.now() + 10000).toISOString();
|
||||
const req = {
|
||||
body: { key: futureDate },
|
||||
user: { id: '123' },
|
||||
app,
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = { modelOptions: { model: 'test-model' } };
|
||||
|
||||
getUserKeyValues.mockResolvedValue({
|
||||
apiKey: JSON.stringify({
|
||||
azureOpenAIApiKey: 'test-user-provided-azure-api-key',
|
||||
azureOpenAIApiDeploymentName: 'test-deployment',
|
||||
}),
|
||||
});
|
||||
|
||||
const { azure } = await initializeClient({ req, res, endpointOption });
|
||||
|
||||
expect(azure.azureOpenAIApiKey).toBe('test-user-provided-azure-api-key');
|
||||
});
|
||||
|
||||
test('should throw an error if the user-provided key has expired', async () => {
|
||||
process.env.OPENAI_API_KEY = 'user_provided';
|
||||
process.env.PLUGINS_USE_AZURE = 'FALSE';
|
||||
const expiresAt = new Date(Date.now() - 10000).toISOString(); // Expired
|
||||
const req = {
|
||||
body: { key: expiresAt },
|
||||
user: { id: '123' },
|
||||
app,
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = { modelOptions: { model: 'default-model' } };
|
||||
|
||||
await expect(initializeClient({ req, res, endpointOption })).rejects.toThrow(
|
||||
/expired_user_key/,
|
||||
);
|
||||
});
|
||||
|
||||
test('should throw an error if the user-provided Azure key is invalid JSON', async () => {
|
||||
process.env.AZURE_API_KEY = 'user_provided';
|
||||
process.env.PLUGINS_USE_AZURE = 'true';
|
||||
|
||||
const req = {
|
||||
body: { key: new Date(Date.now() + 10000).toISOString() },
|
||||
user: { id: '123' },
|
||||
app,
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = { modelOptions: { model: 'default-model' } };
|
||||
|
||||
// Simulate an invalid JSON string returned from getUserKey
|
||||
getUserKey.mockResolvedValue('invalid-json');
|
||||
getUserKeyValues.mockImplementation(() => {
|
||||
let userValues = getUserKey();
|
||||
try {
|
||||
userValues = JSON.parse(userValues);
|
||||
} catch (e) {
|
||||
throw new Error(
|
||||
JSON.stringify({
|
||||
type: ErrorTypes.INVALID_USER_KEY,
|
||||
}),
|
||||
);
|
||||
}
|
||||
return userValues;
|
||||
});
|
||||
|
||||
await expect(initializeClient({ req, res, endpointOption })).rejects.toThrow(
|
||||
/invalid_user_key/,
|
||||
);
|
||||
});
|
||||
|
||||
test('should correctly handle the presence of a reverse proxy', async () => {
|
||||
process.env.OPENAI_REVERSE_PROXY = 'http://reverse.proxy';
|
||||
process.env.PROXY = 'http://proxy';
|
||||
process.env.OPENAI_API_KEY = 'test-openai-api-key';
|
||||
|
||||
const req = {
|
||||
body: { key: null },
|
||||
user: { id: '123' },
|
||||
app,
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = { modelOptions: { model: 'default-model' } };
|
||||
|
||||
const { client } = await initializeClient({ req, res, endpointOption });
|
||||
|
||||
expect(client.options.reverseProxyUrl).toBe('http://reverse.proxy');
|
||||
expect(client.options.proxy).toBe('http://proxy');
|
||||
});
|
||||
|
||||
test('should throw an error when user-provided values are not valid JSON', async () => {
|
||||
process.env.OPENAI_API_KEY = 'user_provided';
|
||||
const req = {
|
||||
body: { key: new Date(Date.now() + 10000).toISOString(), endpoint: 'openAI' },
|
||||
user: { id: '123' },
|
||||
app,
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = {};
|
||||
|
||||
// Mock getUserKey to return a non-JSON string
|
||||
getUserKey.mockResolvedValue('not-a-json');
|
||||
getUserKeyValues.mockImplementation(() => {
|
||||
let userValues = getUserKey();
|
||||
try {
|
||||
userValues = JSON.parse(userValues);
|
||||
} catch (e) {
|
||||
throw new Error(
|
||||
JSON.stringify({
|
||||
type: ErrorTypes.INVALID_USER_KEY,
|
||||
}),
|
||||
);
|
||||
}
|
||||
return userValues;
|
||||
});
|
||||
|
||||
await expect(initializeClient({ req, res, endpointOption })).rejects.toThrow(
|
||||
/invalid_user_key/,
|
||||
);
|
||||
});
|
||||
|
||||
test('should initialize client correctly for Azure OpenAI with valid configuration', async () => {
|
||||
const req = {
|
||||
body: {
|
||||
key: null,
|
||||
endpoint: EModelEndpoint.gptPlugins,
|
||||
model: modelNames[0],
|
||||
},
|
||||
user: { id: '123' },
|
||||
app: {
|
||||
locals: {
|
||||
[EModelEndpoint.azureOpenAI]: {
|
||||
plugins: true,
|
||||
modelNames,
|
||||
modelGroupMap,
|
||||
groupMap,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = {};
|
||||
|
||||
const client = await initializeClient({ req, res, endpointOption });
|
||||
expect(client.client.options.azure).toBeDefined();
|
||||
});
|
||||
|
||||
test('should initialize client with default options when certain env vars are not set', async () => {
|
||||
delete process.env.OPENAI_SUMMARIZE;
|
||||
process.env.OPENAI_API_KEY = 'some-api-key';
|
||||
|
||||
const req = {
|
||||
body: { key: null, endpoint: EModelEndpoint.gptPlugins },
|
||||
user: { id: '123' },
|
||||
app,
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = {};
|
||||
|
||||
const client = await initializeClient({ req, res, endpointOption });
|
||||
expect(client.client.options.contextStrategy).toBe(null);
|
||||
});
|
||||
|
||||
test('should correctly use user-provided apiKey and baseURL when provided', async () => {
|
||||
process.env.OPENAI_API_KEY = 'user_provided';
|
||||
process.env.OPENAI_REVERSE_PROXY = 'user_provided';
|
||||
const req = {
|
||||
body: {
|
||||
key: new Date(Date.now() + 10000).toISOString(),
|
||||
endpoint: 'openAI',
|
||||
},
|
||||
user: {
|
||||
id: '123',
|
||||
},
|
||||
app,
|
||||
};
|
||||
const res = {};
|
||||
const endpointOption = {};
|
||||
|
||||
getUserKeyValues.mockResolvedValue({
|
||||
apiKey: 'test',
|
||||
baseURL: 'https://user-provided-url.com',
|
||||
});
|
||||
|
||||
const result = await initializeClient({ req, res, endpointOption });
|
||||
|
||||
expect(result.openAIApiKey).toBe('test');
|
||||
expect(result.client.options.reverseProxyUrl).toBe('https://user-provided-url.com');
|
||||
});
|
||||
});
|
||||
58
api/server/services/Endpoints/index.js
Normal file
58
api/server/services/Endpoints/index.js
Normal file
@@ -0,0 +1,58 @@
|
||||
const { Providers } = require('@librechat/agents');
|
||||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
const initAnthropic = require('~/server/services/Endpoints/anthropic/initialize');
|
||||
const getBedrockOptions = require('~/server/services/Endpoints/bedrock/options');
|
||||
const initOpenAI = require('~/server/services/Endpoints/openAI/initialize');
|
||||
const initCustom = require('~/server/services/Endpoints/custom/initialize');
|
||||
const initGoogle = require('~/server/services/Endpoints/google/initialize');
|
||||
const { getCustomEndpointConfig } = require('~/server/services/Config');
|
||||
|
||||
const providerConfigMap = {
|
||||
[Providers.XAI]: initCustom,
|
||||
[Providers.OLLAMA]: initCustom,
|
||||
[Providers.DEEPSEEK]: initCustom,
|
||||
[Providers.OPENROUTER]: initCustom,
|
||||
[EModelEndpoint.openAI]: initOpenAI,
|
||||
[EModelEndpoint.google]: initGoogle,
|
||||
[EModelEndpoint.azureOpenAI]: initOpenAI,
|
||||
[EModelEndpoint.anthropic]: initAnthropic,
|
||||
[EModelEndpoint.bedrock]: getBedrockOptions,
|
||||
};
|
||||
|
||||
/**
|
||||
* Get the provider configuration and override endpoint based on the provider string
|
||||
* @param {string} provider - The provider string
|
||||
* @returns {Promise<{
|
||||
* getOptions: Function,
|
||||
* overrideProvider?: string,
|
||||
* customEndpointConfig?: TEndpoint
|
||||
* }>}
|
||||
*/
|
||||
async function getProviderConfig(provider) {
|
||||
let getOptions = providerConfigMap[provider];
|
||||
let overrideProvider;
|
||||
/** @type {TEndpoint | undefined} */
|
||||
let customEndpointConfig;
|
||||
|
||||
if (!getOptions && providerConfigMap[provider.toLowerCase()] != null) {
|
||||
overrideProvider = provider.toLowerCase();
|
||||
getOptions = providerConfigMap[overrideProvider];
|
||||
} else if (!getOptions) {
|
||||
customEndpointConfig = await getCustomEndpointConfig(provider);
|
||||
if (!customEndpointConfig) {
|
||||
throw new Error(`Provider ${provider} not supported`);
|
||||
}
|
||||
getOptions = initCustom;
|
||||
overrideProvider = Providers.OPENAI;
|
||||
}
|
||||
|
||||
return {
|
||||
getOptions,
|
||||
overrideProvider,
|
||||
customEndpointConfig,
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
getProviderConfig,
|
||||
};
|
||||
@@ -1,11 +1,7 @@
|
||||
const {
|
||||
ErrorTypes,
|
||||
EModelEndpoint,
|
||||
resolveHeaders,
|
||||
mapModelToAzureConfig,
|
||||
} = require('librechat-data-provider');
|
||||
const { ErrorTypes, EModelEndpoint, mapModelToAzureConfig } = require('librechat-data-provider');
|
||||
const {
|
||||
isEnabled,
|
||||
resolveHeaders,
|
||||
isUserProvided,
|
||||
getOpenAIConfig,
|
||||
getAzureCredentials,
|
||||
@@ -84,7 +80,10 @@ const initializeClient = async ({
|
||||
});
|
||||
|
||||
clientOptions.reverseProxyUrl = baseURL ?? clientOptions.reverseProxyUrl;
|
||||
clientOptions.headers = resolveHeaders({ ...headers, ...(clientOptions.headers ?? {}) });
|
||||
clientOptions.headers = resolveHeaders(
|
||||
{ ...headers, ...(clientOptions.headers ?? {}) },
|
||||
req.user,
|
||||
);
|
||||
|
||||
clientOptions.titleConvo = azureConfig.titleConvo;
|
||||
clientOptions.titleModel = azureConfig.titleModel;
|
||||
@@ -139,7 +138,7 @@ const initializeClient = async ({
|
||||
}
|
||||
|
||||
if (optionsOnly) {
|
||||
const modelOptions = endpointOption.model_parameters;
|
||||
const modelOptions = endpointOption?.model_parameters ?? {};
|
||||
modelOptions.model = modelName;
|
||||
clientOptions = Object.assign({ modelOptions }, clientOptions);
|
||||
clientOptions.modelOptions.user = req.user.id;
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
const { sleep } = require('@librechat/agents');
|
||||
const { sendEvent } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const {
|
||||
Constants,
|
||||
StepTypes,
|
||||
@@ -8,9 +11,8 @@ const {
|
||||
} = require('librechat-data-provider');
|
||||
const { retrieveAndProcessFile } = require('~/server/services/Files/process');
|
||||
const { processRequiredActions } = require('~/server/services/ToolService');
|
||||
const { createOnProgress, sendMessage, sleep } = require('~/server/utils');
|
||||
const { processMessages } = require('~/server/services/Threads');
|
||||
const { logger } = require('~/config');
|
||||
const { createOnProgress } = require('~/server/utils');
|
||||
|
||||
/**
|
||||
* Implements the StreamRunManager functionality for managing the streaming
|
||||
@@ -126,7 +128,7 @@ class StreamRunManager {
|
||||
conversationId: this.finalMessage.conversationId,
|
||||
};
|
||||
|
||||
sendMessage(this.res, contentData);
|
||||
sendEvent(this.res, contentData);
|
||||
}
|
||||
|
||||
/* <------------------ Misc. Helpers ------------------> */
|
||||
@@ -302,7 +304,7 @@ class StreamRunManager {
|
||||
|
||||
for (const d of delta[key]) {
|
||||
if (typeof d === 'object' && !Object.prototype.hasOwnProperty.call(d, 'index')) {
|
||||
logger.warn('Expected an object with an \'index\' for array updates but got:', d);
|
||||
logger.warn("Expected an object with an 'index' for array updates but got:", d);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { CacheKeys, processMCPEnv } = require('librechat-data-provider');
|
||||
const { CacheKeys } = require('librechat-data-provider');
|
||||
const { findToken, updateToken, createToken, deleteTokens } = require('~/models');
|
||||
const { getMCPManager, getFlowStateManager } = require('~/config');
|
||||
const { getCachedTools, setCachedTools } = require('./Config');
|
||||
const { getLogStores } = require('~/cache');
|
||||
const { findToken, updateToken, createToken, deleteTokens } = require('~/models');
|
||||
|
||||
/**
|
||||
* Initialize MCP servers
|
||||
@@ -30,7 +30,6 @@ async function initializeMCP(app) {
|
||||
createToken,
|
||||
deleteTokens,
|
||||
},
|
||||
processMCPEnv,
|
||||
});
|
||||
|
||||
delete app.locals.mcpConfig;
|
||||
|
||||
@@ -41,6 +41,7 @@ async function loadDefaultInterface(config, configDefaults, roleName = SystemRol
|
||||
sidePanel: interfaceConfig?.sidePanel ?? defaults.sidePanel,
|
||||
privacyPolicy: interfaceConfig?.privacyPolicy ?? defaults.privacyPolicy,
|
||||
termsOfService: interfaceConfig?.termsOfService ?? defaults.termsOfService,
|
||||
mcpServers: interfaceConfig?.mcpServers ?? defaults.mcpServers,
|
||||
bookmarks: interfaceConfig?.bookmarks ?? defaults.bookmarks,
|
||||
memories: shouldDisableMemories ? false : (interfaceConfig?.memories ?? defaults.memories),
|
||||
prompts: interfaceConfig?.prompts ?? defaults.prompts,
|
||||
|
||||
@@ -7,9 +7,9 @@ const {
|
||||
defaultAssistantsVersion,
|
||||
defaultAgentCapabilities,
|
||||
} = require('librechat-data-provider');
|
||||
const { sendEvent } = require('@librechat/api');
|
||||
const { Providers } = require('@librechat/agents');
|
||||
const partialRight = require('lodash/partialRight');
|
||||
const { sendMessage } = require('./streamResponse');
|
||||
|
||||
/** Helper function to escape special characters in regex
|
||||
* @param {string} string - The string to escape.
|
||||
@@ -37,7 +37,7 @@ const createOnProgress = (
|
||||
basePayload.text = basePayload.text + chunk;
|
||||
|
||||
const payload = Object.assign({}, basePayload, rest);
|
||||
sendMessage(res, payload);
|
||||
sendEvent(res, payload);
|
||||
if (_onProgress) {
|
||||
_onProgress(payload);
|
||||
}
|
||||
@@ -50,7 +50,7 @@ const createOnProgress = (
|
||||
const sendIntermediateMessage = (res, payload, extraTokens = '') => {
|
||||
basePayload.text = basePayload.text + extraTokens;
|
||||
const message = Object.assign({}, basePayload, payload);
|
||||
sendMessage(res, message);
|
||||
sendEvent(res, message);
|
||||
if (i === 0) {
|
||||
basePayload.initial = false;
|
||||
}
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
const streamResponse = require('./streamResponse');
|
||||
const removePorts = require('./removePorts');
|
||||
const countTokens = require('./countTokens');
|
||||
const handleText = require('./handleText');
|
||||
const sendEmail = require('./sendEmail');
|
||||
const queue = require('./queue');
|
||||
const files = require('./files');
|
||||
const math = require('./math');
|
||||
|
||||
/**
|
||||
* Check if email configuration is set
|
||||
@@ -28,7 +26,6 @@ function checkEmailConfig() {
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
...streamResponse,
|
||||
checkEmailConfig,
|
||||
...handleText,
|
||||
countTokens,
|
||||
@@ -36,5 +33,4 @@ module.exports = {
|
||||
sendEmail,
|
||||
...files,
|
||||
...queue,
|
||||
math,
|
||||
};
|
||||
|
||||
@@ -1503,7 +1503,6 @@
|
||||
* @property {boolean|{userProvide: boolean}} [anthropic] - Flag to indicate if Anthropic endpoint is user provided, or its configuration.
|
||||
* @property {boolean|{userProvide: boolean}} [google] - Flag to indicate if Google endpoint is user provided, or its configuration.
|
||||
* @property {boolean|{userProvide: boolean, userProvideURL: boolean, name: string}} [custom] - Custom Endpoint configuration.
|
||||
* @property {boolean|GptPlugins} [gptPlugins] - Configuration for GPT plugins.
|
||||
* @memberof typedefs
|
||||
*/
|
||||
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
const loadYaml = require('./loadYaml');
|
||||
const tokenHelpers = require('./tokens');
|
||||
const deriveBaseURL = require('./deriveBaseURL');
|
||||
const extractBaseURL = require('./extractBaseURL');
|
||||
const findMessageContent = require('./findMessageContent');
|
||||
|
||||
module.exports = {
|
||||
loadYaml,
|
||||
deriveBaseURL,
|
||||
extractBaseURL,
|
||||
...tokenHelpers,
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
const fs = require('fs');
|
||||
const yaml = require('js-yaml');
|
||||
|
||||
function loadYaml(filepath) {
|
||||
try {
|
||||
let fileContents = fs.readFileSync(filepath, 'utf8');
|
||||
return yaml.load(fileContents);
|
||||
} catch (e) {
|
||||
return e;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = loadYaml;
|
||||
@@ -1,6 +1,7 @@
|
||||
import React, { createContext, useContext } from 'react';
|
||||
import { Tools, LocalStorageKeys } from 'librechat-data-provider';
|
||||
import { useMCPSelect, useToolToggle, useCodeApiKeyForm, useSearchApiKeyForm } from '~/hooks';
|
||||
import { useGetStartupConfig } from '~/data-provider';
|
||||
|
||||
interface BadgeRowContextType {
|
||||
conversationId?: string | null;
|
||||
@@ -10,6 +11,7 @@ interface BadgeRowContextType {
|
||||
fileSearch: ReturnType<typeof useToolToggle>;
|
||||
codeApiKeyForm: ReturnType<typeof useCodeApiKeyForm>;
|
||||
searchApiKeyForm: ReturnType<typeof useSearchApiKeyForm>;
|
||||
startupConfig: ReturnType<typeof useGetStartupConfig>['data'];
|
||||
}
|
||||
|
||||
const BadgeRowContext = createContext<BadgeRowContextType | undefined>(undefined);
|
||||
@@ -28,6 +30,9 @@ interface BadgeRowProviderProps {
|
||||
}
|
||||
|
||||
export default function BadgeRowProvider({ children, conversationId }: BadgeRowProviderProps) {
|
||||
/** Startup config */
|
||||
const { data: startupConfig } = useGetStartupConfig();
|
||||
|
||||
/** MCPSelect hook */
|
||||
const mcpSelect = useMCPSelect({ conversationId });
|
||||
|
||||
@@ -73,6 +78,7 @@ export default function BadgeRowProvider({ children, conversationId }: BadgeRowP
|
||||
mcpSelect,
|
||||
webSearch,
|
||||
fileSearch,
|
||||
startupConfig,
|
||||
conversationId,
|
||||
codeApiKeyForm,
|
||||
codeInterpreter,
|
||||
|
||||
@@ -68,6 +68,7 @@ export default function ExportAndShareMenu({
|
||||
return (
|
||||
<>
|
||||
<DropdownPopup
|
||||
portal={true}
|
||||
menuId={menuId}
|
||||
focusLoop={true}
|
||||
unmountOnHide={true}
|
||||
|
||||
@@ -2,8 +2,7 @@ import React, { memo, useCallback, useState } from 'react';
|
||||
import { SettingsIcon } from 'lucide-react';
|
||||
import { Constants } from 'librechat-data-provider';
|
||||
import { useUpdateUserPluginsMutation } from 'librechat-data-provider/react-query';
|
||||
import type { TUpdateUserPlugins } from 'librechat-data-provider';
|
||||
import type { McpServerInfo } from '~/hooks/Plugins/useMCPSelect';
|
||||
import type { TUpdateUserPlugins, TPlugin } from 'librechat-data-provider';
|
||||
import MCPConfigDialog, { type ConfigFieldDetail } from '~/components/ui/MCPConfigDialog';
|
||||
import { useToastContext, useBadgeRowContext } from '~/Providers';
|
||||
import MultiSelect from '~/components/ui/MultiSelect';
|
||||
@@ -18,11 +17,11 @@ const getBaseMCPPluginKey = (fullPluginKey: string): string => {
|
||||
function MCPSelect() {
|
||||
const localize = useLocalize();
|
||||
const { showToast } = useToastContext();
|
||||
const { mcpSelect } = useBadgeRowContext();
|
||||
const { mcpSelect, startupConfig } = useBadgeRowContext();
|
||||
const { mcpValues, setMCPValues, mcpServerNames, mcpToolDetails, isPinned } = mcpSelect;
|
||||
|
||||
const [isConfigModalOpen, setIsConfigModalOpen] = useState(false);
|
||||
const [selectedToolForConfig, setSelectedToolForConfig] = useState<McpServerInfo | null>(null);
|
||||
const [selectedToolForConfig, setSelectedToolForConfig] = useState<TPlugin | null>(null);
|
||||
|
||||
const updateUserPluginsMutation = useUpdateUserPluginsMutation({
|
||||
onSuccess: () => {
|
||||
@@ -129,6 +128,8 @@ function MCPSelect() {
|
||||
return null;
|
||||
}
|
||||
|
||||
const placeholderText =
|
||||
startupConfig?.interface?.mcpServers?.placeholder || localize('com_ui_mcp_servers');
|
||||
return (
|
||||
<>
|
||||
<MultiSelect
|
||||
@@ -138,7 +139,7 @@ function MCPSelect() {
|
||||
defaultSelectedValues={mcpValues ?? []}
|
||||
renderSelectedValues={renderSelectedValues}
|
||||
renderItemContent={renderItemContent}
|
||||
placeholder={localize('com_ui_mcp_servers')}
|
||||
placeholder={placeholderText}
|
||||
popoverClassName="min-w-fit"
|
||||
className="badge-icon min-w-fit"
|
||||
selectIcon={<MCPIcon className="icon-md text-text-primary" />}
|
||||
|
||||
@@ -11,6 +11,7 @@ interface MCPSubMenuProps {
|
||||
mcpValues?: string[];
|
||||
mcpServerNames: string[];
|
||||
handleMCPToggle: (serverName: string) => void;
|
||||
placeholder?: string;
|
||||
}
|
||||
|
||||
const MCPSubMenu = ({
|
||||
@@ -19,11 +20,13 @@ const MCPSubMenu = ({
|
||||
mcpServerNames,
|
||||
setIsMCPPinned,
|
||||
handleMCPToggle,
|
||||
placeholder,
|
||||
...props
|
||||
}: MCPSubMenuProps) => {
|
||||
const localize = useLocalize();
|
||||
|
||||
const menuStore = Ariakit.useMenuStore({
|
||||
focusLoop: true,
|
||||
showTimeout: 100,
|
||||
placement: 'right',
|
||||
});
|
||||
@@ -33,12 +36,18 @@ const MCPSubMenu = ({
|
||||
<Ariakit.MenuItem
|
||||
{...props}
|
||||
render={
|
||||
<Ariakit.MenuButton className="flex w-full cursor-pointer items-center justify-between rounded-lg p-2 hover:bg-surface-hover" />
|
||||
<Ariakit.MenuButton
|
||||
onClick={(e: React.MouseEvent<HTMLButtonElement>) => {
|
||||
e.stopPropagation();
|
||||
menuStore.toggle();
|
||||
}}
|
||||
className="flex w-full cursor-pointer items-center justify-between rounded-lg p-2 hover:bg-surface-hover"
|
||||
/>
|
||||
}
|
||||
>
|
||||
<div className="flex items-center gap-2">
|
||||
<MCPIcon className="icon-md" />
|
||||
<span>{localize('com_ui_mcp_servers')}</span>
|
||||
<span>{placeholder || localize('com_ui_mcp_servers')}</span>
|
||||
<ChevronRight className="ml-auto h-3 w-3" />
|
||||
</div>
|
||||
<button
|
||||
@@ -60,10 +69,8 @@ const MCPSubMenu = ({
|
||||
</button>
|
||||
</Ariakit.MenuItem>
|
||||
<Ariakit.Menu
|
||||
gutter={-4}
|
||||
shift={-8}
|
||||
unmountOnHide
|
||||
portal={true}
|
||||
unmountOnHide={true}
|
||||
className={cn(
|
||||
'animate-popover-left z-50 ml-3 flex min-w-[200px] flex-col rounded-xl',
|
||||
'border border-border-light bg-surface-secondary p-1 shadow-lg',
|
||||
|
||||
@@ -18,8 +18,15 @@ const ToolsDropdown = ({ disabled }: ToolsDropdownProps) => {
|
||||
const localize = useLocalize();
|
||||
const isDisabled = disabled ?? false;
|
||||
const [isPopoverActive, setIsPopoverActive] = useState(false);
|
||||
const { webSearch, codeInterpreter, fileSearch, mcpSelect, searchApiKeyForm, codeApiKeyForm } =
|
||||
useBadgeRowContext();
|
||||
const {
|
||||
webSearch,
|
||||
mcpSelect,
|
||||
fileSearch,
|
||||
startupConfig,
|
||||
codeApiKeyForm,
|
||||
codeInterpreter,
|
||||
searchApiKeyForm,
|
||||
} = useBadgeRowContext();
|
||||
const { setIsDialogOpen: setIsCodeDialogOpen, menuTriggerRef: codeMenuTriggerRef } =
|
||||
codeApiKeyForm;
|
||||
const { setIsDialogOpen: setIsSearchDialogOpen, menuTriggerRef: searchMenuTriggerRef } =
|
||||
@@ -89,18 +96,10 @@ const ToolsDropdown = ({ disabled }: ToolsDropdownProps) => {
|
||||
[mcpSelect],
|
||||
);
|
||||
|
||||
const dropdownItems = useMemo(() => {
|
||||
const items: MenuItemProps[] = [
|
||||
{
|
||||
render: () => (
|
||||
<div className="px-3 py-2 text-xs font-semibold text-text-secondary">
|
||||
{localize('com_ui_tools')}
|
||||
</div>
|
||||
),
|
||||
hideOnClick: false,
|
||||
},
|
||||
];
|
||||
const mcpPlaceholder = startupConfig?.interface?.mcpServers?.placeholder;
|
||||
|
||||
const dropdownItems = useMemo(() => {
|
||||
const items: MenuItemProps[] = [];
|
||||
items.push({
|
||||
onClick: handleFileSearchToggle,
|
||||
hideOnClick: false,
|
||||
@@ -246,8 +245,9 @@ const ToolsDropdown = ({ disabled }: ToolsDropdownProps) => {
|
||||
<MCPSubMenu
|
||||
{...props}
|
||||
mcpValues={mcpValues}
|
||||
mcpServerNames={mcpServerNames}
|
||||
isMCPPinned={isMCPPinned}
|
||||
placeholder={mcpPlaceholder}
|
||||
mcpServerNames={mcpServerNames}
|
||||
setIsMCPPinned={setIsMCPPinned}
|
||||
handleMCPToggle={handleMCPToggle}
|
||||
/>
|
||||
@@ -262,6 +262,7 @@ const ToolsDropdown = ({ disabled }: ToolsDropdownProps) => {
|
||||
canRunCode,
|
||||
isMCPPinned,
|
||||
isCodePinned,
|
||||
mcpPlaceholder,
|
||||
mcpServerNames,
|
||||
isSearchPinned,
|
||||
setIsMCPPinned,
|
||||
|
||||
@@ -1,14 +1,33 @@
|
||||
import { useState, useEffect } from 'react';
|
||||
import { X, ArrowDownToLine, PanelLeftOpen, PanelLeftClose } from 'lucide-react';
|
||||
import { useState, useEffect, useCallback, useRef } from 'react';
|
||||
import { X, ArrowDownToLine, PanelLeftOpen, PanelLeftClose, RotateCcw } from 'lucide-react';
|
||||
import { Button, OGDialog, OGDialogContent, TooltipAnchor } from '~/components';
|
||||
import { useLocalize } from '~/hooks';
|
||||
|
||||
const getQualityStyles = (quality: string): string => {
|
||||
if (quality === 'high') {
|
||||
return 'bg-green-100 text-green-800';
|
||||
}
|
||||
if (quality === 'low') {
|
||||
return 'bg-orange-100 text-orange-800';
|
||||
}
|
||||
return 'bg-gray-100 text-gray-800';
|
||||
};
|
||||
|
||||
export default function DialogImage({ isOpen, onOpenChange, src = '', downloadImage, args }) {
|
||||
const localize = useLocalize();
|
||||
const [isPromptOpen, setIsPromptOpen] = useState(false);
|
||||
const [imageSize, setImageSize] = useState<string | null>(null);
|
||||
|
||||
const getImageSize = async (url: string) => {
|
||||
// Zoom and pan state
|
||||
const [zoom, setZoom] = useState(1);
|
||||
const [panX, setPanX] = useState(0);
|
||||
const [panY, setPanY] = useState(0);
|
||||
const [isDragging, setIsDragging] = useState(false);
|
||||
const [dragStart, setDragStart] = useState({ x: 0, y: 0 });
|
||||
|
||||
const containerRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
const getImageSize = useCallback(async (url: string) => {
|
||||
try {
|
||||
const response = await fetch(url, { method: 'HEAD' });
|
||||
const contentLength = response.headers.get('Content-Length');
|
||||
@@ -25,7 +44,7 @@ export default function DialogImage({ isOpen, onOpenChange, src = '', downloadIm
|
||||
console.error('Error getting image size:', error);
|
||||
return null;
|
||||
}
|
||||
};
|
||||
}, []);
|
||||
|
||||
const formatFileSize = (bytes: number): string => {
|
||||
if (bytes === 0) return '0 Bytes';
|
||||
@@ -37,11 +56,129 @@ export default function DialogImage({ isOpen, onOpenChange, src = '', downloadIm
|
||||
return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i];
|
||||
};
|
||||
|
||||
const getImageMaxWidth = () => {
|
||||
// On mobile (when panel overlays), use full width minus padding
|
||||
// On desktop, account for the side panel width
|
||||
if (isPromptOpen) {
|
||||
return window.innerWidth >= 640 ? 'calc(100vw - 22rem)' : 'calc(100vw - 2rem)';
|
||||
}
|
||||
return 'calc(100vw - 2rem)';
|
||||
};
|
||||
|
||||
const resetZoom = useCallback(() => {
|
||||
setZoom(1);
|
||||
setPanX(0);
|
||||
setPanY(0);
|
||||
}, []);
|
||||
|
||||
const getCursor = () => {
|
||||
if (zoom <= 1) return 'default';
|
||||
return isDragging ? 'grabbing' : 'grab';
|
||||
};
|
||||
|
||||
const handleDoubleClick = useCallback(() => {
|
||||
if (zoom > 1) {
|
||||
resetZoom();
|
||||
} else {
|
||||
// Zoom in to 2x on double click when at normal zoom
|
||||
setZoom(2);
|
||||
}
|
||||
}, [zoom, resetZoom]);
|
||||
|
||||
const handleWheel = useCallback(
|
||||
(e: React.WheelEvent<HTMLDivElement>) => {
|
||||
e.preventDefault();
|
||||
if (!containerRef.current) return;
|
||||
|
||||
const rect = containerRef.current.getBoundingClientRect();
|
||||
const mouseX = e.clientX - rect.left;
|
||||
const mouseY = e.clientY - rect.top;
|
||||
|
||||
// Calculate zoom factor
|
||||
const zoomFactor = e.deltaY > 0 ? 0.9 : 1.1;
|
||||
const newZoom = Math.min(Math.max(zoom * zoomFactor, 1), 5);
|
||||
|
||||
if (newZoom === zoom) return;
|
||||
|
||||
// If zooming back to 1, reset pan to center the image
|
||||
if (newZoom === 1) {
|
||||
setZoom(1);
|
||||
setPanX(0);
|
||||
setPanY(0);
|
||||
return;
|
||||
}
|
||||
|
||||
// Calculate the zoom center relative to the current viewport
|
||||
const containerCenterX = rect.width / 2;
|
||||
const containerCenterY = rect.height / 2;
|
||||
|
||||
// Calculate new pan position to zoom towards mouse cursor
|
||||
const zoomRatio = newZoom / zoom;
|
||||
const deltaX = (mouseX - containerCenterX - panX) * (zoomRatio - 1);
|
||||
const deltaY = (mouseY - containerCenterY - panY) * (zoomRatio - 1);
|
||||
|
||||
setZoom(newZoom);
|
||||
setPanX(panX - deltaX);
|
||||
setPanY(panY - deltaY);
|
||||
},
|
||||
[zoom, panX, panY],
|
||||
);
|
||||
|
||||
const handleMouseDown = useCallback(
|
||||
(e: React.MouseEvent<HTMLDivElement>) => {
|
||||
e.preventDefault();
|
||||
if (zoom <= 1) return;
|
||||
setIsDragging(true);
|
||||
setDragStart({
|
||||
x: e.clientX - panX,
|
||||
y: e.clientY - panY,
|
||||
});
|
||||
},
|
||||
[zoom, panX, panY],
|
||||
);
|
||||
|
||||
const handleMouseMove = useCallback(
|
||||
(e: React.MouseEvent<HTMLDivElement>) => {
|
||||
if (!isDragging || zoom <= 1) return;
|
||||
const newPanX = e.clientX - dragStart.x;
|
||||
const newPanY = e.clientY - dragStart.y;
|
||||
setPanX(newPanX);
|
||||
setPanY(newPanY);
|
||||
},
|
||||
[isDragging, dragStart, zoom],
|
||||
);
|
||||
const handleMouseUp = useCallback(() => {
|
||||
setIsDragging(false);
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
const onKey = (e: KeyboardEvent) => e.key === 'Escape' && resetZoom();
|
||||
document.addEventListener('keydown', onKey);
|
||||
return () => document.removeEventListener('keydown', onKey);
|
||||
}, [resetZoom]);
|
||||
|
||||
useEffect(() => {
|
||||
if (isOpen && src) {
|
||||
getImageSize(src).then(setImageSize);
|
||||
resetZoom();
|
||||
}
|
||||
}, [isOpen, src]);
|
||||
}, [isOpen, src, getImageSize, resetZoom]);
|
||||
|
||||
// Ensure image is centered when zoom changes to 1
|
||||
useEffect(() => {
|
||||
if (zoom === 1) {
|
||||
setPanX(0);
|
||||
setPanY(0);
|
||||
}
|
||||
}, [zoom]);
|
||||
|
||||
// Reset pan when panel opens/closes to maintain centering
|
||||
useEffect(() => {
|
||||
if (zoom === 1) {
|
||||
setPanX(0);
|
||||
setPanY(0);
|
||||
}
|
||||
}, [isPromptOpen, zoom]);
|
||||
|
||||
return (
|
||||
<OGDialog open={isOpen} onOpenChange={onOpenChange}>
|
||||
@@ -52,7 +189,7 @@ export default function DialogImage({ isOpen, onOpenChange, src = '', downloadIm
|
||||
overlayClassName="bg-surface-primary opacity-95 z-50"
|
||||
>
|
||||
<div
|
||||
className={`absolute left-0 top-0 z-10 flex items-center justify-between p-4 transition-all duration-500 ease-in-out ${isPromptOpen ? 'right-80' : 'right-0'}`}
|
||||
className={`ease-[cubic-bezier(0.175,0.885,0.32,1.275)] absolute left-0 top-0 z-10 flex items-center justify-between p-3 transition-all duration-500 sm:p-4 ${isPromptOpen ? 'right-0 sm:right-80' : 'right-0'}`}
|
||||
>
|
||||
<TooltipAnchor
|
||||
description={localize('com_ui_close')}
|
||||
@@ -62,11 +199,21 @@ export default function DialogImage({ isOpen, onOpenChange, src = '', downloadIm
|
||||
variant="ghost"
|
||||
className="h-10 w-10 p-0 hover:bg-surface-hover"
|
||||
>
|
||||
<X className="size-6" />
|
||||
<X className="size-7 sm:size-6" />
|
||||
</Button>
|
||||
}
|
||||
/>
|
||||
<div className="flex items-center gap-2">
|
||||
<div className="flex items-center gap-1 sm:gap-2">
|
||||
{zoom > 1 && (
|
||||
<TooltipAnchor
|
||||
description={localize('com_ui_reset_zoom')}
|
||||
render={
|
||||
<Button onClick={resetZoom} variant="ghost" className="h-10 w-10 p-0">
|
||||
<RotateCcw className="size-6" />
|
||||
</Button>
|
||||
}
|
||||
/>
|
||||
)}
|
||||
<TooltipAnchor
|
||||
description={localize('com_ui_download')}
|
||||
render={
|
||||
@@ -88,9 +235,9 @@ export default function DialogImage({ isOpen, onOpenChange, src = '', downloadIm
|
||||
className="h-10 w-10 p-0"
|
||||
>
|
||||
{isPromptOpen ? (
|
||||
<PanelLeftOpen className="size-6" />
|
||||
<PanelLeftOpen className="size-7 sm:size-6" />
|
||||
) : (
|
||||
<PanelLeftClose className="size-6" />
|
||||
<PanelLeftClose className="size-7 sm:size-6" />
|
||||
)}
|
||||
</Button>
|
||||
}
|
||||
@@ -100,36 +247,81 @@ export default function DialogImage({ isOpen, onOpenChange, src = '', downloadIm
|
||||
|
||||
{/* Main content area with image */}
|
||||
<div
|
||||
className={`flex h-full transition-all duration-500 ease-in-out ${isPromptOpen ? 'mr-80' : 'mr-0'}`}
|
||||
className={`ease-[cubic-bezier(0.175,0.885,0.32,1.275)] flex h-full transition-all duration-500 ${isPromptOpen ? 'mr-0 sm:mr-80' : 'mr-0'}`}
|
||||
>
|
||||
<div className="flex flex-1 items-center justify-center px-4 pb-4 pt-20">
|
||||
<img
|
||||
src={src}
|
||||
alt="Image"
|
||||
className="max-h-full max-w-full object-contain"
|
||||
<div
|
||||
ref={containerRef}
|
||||
className="flex flex-1 items-center justify-center px-2 pb-4 pt-16 sm:px-4 sm:pt-20"
|
||||
onWheel={handleWheel}
|
||||
onMouseDown={handleMouseDown}
|
||||
onMouseMove={handleMouseMove}
|
||||
onMouseUp={handleMouseUp}
|
||||
onMouseLeave={handleMouseUp}
|
||||
onDoubleClick={handleDoubleClick}
|
||||
style={{
|
||||
cursor: getCursor(),
|
||||
overflow: zoom > 1 ? 'hidden' : 'visible',
|
||||
minHeight: 0, // Allow flexbox to shrink
|
||||
}}
|
||||
>
|
||||
<div
|
||||
className="flex items-center justify-center transition-transform duration-100 ease-out"
|
||||
style={{
|
||||
maxHeight: 'calc(100vh - 6rem)',
|
||||
maxWidth: '100%',
|
||||
transform: `translate(${panX}px, ${panY}px) scale(${zoom})`,
|
||||
transformOrigin: 'center center',
|
||||
width: '100%',
|
||||
height: '100%',
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
justifyContent: 'center',
|
||||
}}
|
||||
/>
|
||||
>
|
||||
<img
|
||||
src={src}
|
||||
alt="Image"
|
||||
className="block object-contain"
|
||||
style={{
|
||||
maxHeight: 'calc(100vh - 8rem)',
|
||||
maxWidth: getImageMaxWidth(),
|
||||
width: 'auto',
|
||||
height: 'auto',
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Side Panel */}
|
||||
<div
|
||||
className={`shadow-l-lg fixed right-0 top-0 z-20 h-full w-80 transform rounded-l-2xl border-l border-border-light bg-surface-primary transition-transform duration-500 ease-in-out ${
|
||||
className={`sm:shadow-l-lg ease-[cubic-bezier(0.175,0.885,0.32,1.275)] fixed right-0 top-0 z-20 h-full w-full transform border-l border-border-light bg-surface-primary shadow-2xl backdrop-blur-sm transition-transform duration-500 sm:w-80 sm:rounded-l-2xl ${
|
||||
isPromptOpen ? 'translate-x-0' : 'translate-x-full'
|
||||
}`}
|
||||
>
|
||||
<div className="h-full overflow-y-auto p-6">
|
||||
<div className="mb-4">
|
||||
{/* Mobile pull handle - removed for cleaner look */}
|
||||
|
||||
<div className="h-full overflow-y-auto p-4 sm:p-6">
|
||||
{/* Mobile close button */}
|
||||
<div className="mb-4 flex items-center justify-between sm:hidden">
|
||||
<h3 className="text-lg font-semibold text-text-primary">
|
||||
{localize('com_ui_image_details')}
|
||||
</h3>
|
||||
<Button
|
||||
onClick={() => setIsPromptOpen(false)}
|
||||
variant="ghost"
|
||||
className="h-12 w-12 p-0"
|
||||
>
|
||||
<X className="size-6" />
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
<div className="mb-4 hidden sm:block">
|
||||
<h3 className="mb-2 text-lg font-semibold text-text-primary">
|
||||
{localize('com_ui_image_details')}
|
||||
</h3>
|
||||
<div className="mb-4 h-px bg-border-medium"></div>
|
||||
</div>
|
||||
|
||||
<div className="space-y-6">
|
||||
<div className="space-y-4 sm:space-y-6">
|
||||
{/* Prompt Section */}
|
||||
<div>
|
||||
<h4 className="mb-2 text-sm font-medium text-text-primary">
|
||||
@@ -157,13 +349,7 @@ export default function DialogImage({ isOpen, onOpenChange, src = '', downloadIm
|
||||
<div className="flex items-center justify-between">
|
||||
<span className="text-sm text-text-primary">{localize('com_ui_quality')}:</span>
|
||||
<span
|
||||
className={`rounded px-2 py-1 text-xs font-medium capitalize ${
|
||||
args?.quality === 'high'
|
||||
? 'bg-green-100 text-green-800'
|
||||
: args?.quality === 'low'
|
||||
? 'bg-orange-100 text-orange-800'
|
||||
: 'bg-gray-100 text-gray-800'
|
||||
}`}
|
||||
className={`rounded px-2 py-1 text-xs font-medium capitalize ${getQualityStyles(args?.quality || '')}`}
|
||||
>
|
||||
{args?.quality || 'Standard'}
|
||||
</span>
|
||||
|
||||
@@ -6,6 +6,7 @@ import { useUpdateUserPluginsMutation } from 'librechat-data-provider/react-quer
|
||||
import type { TUpdateUserPlugins } from 'librechat-data-provider';
|
||||
import { Button, Input, Label } from '~/components/ui';
|
||||
import { useGetStartupConfig } from '~/data-provider';
|
||||
import { useAddToolMutation } from '~/data-provider/Tools';
|
||||
import MCPPanelSkeleton from './MCPPanelSkeleton';
|
||||
import { useToastContext } from '~/Providers';
|
||||
import { useLocalize } from '~/hooks';
|
||||
@@ -17,6 +18,12 @@ interface ServerConfigWithVars {
|
||||
};
|
||||
}
|
||||
|
||||
interface AddToolFormData {
|
||||
name: string;
|
||||
description: string;
|
||||
type: 'function' | 'code_interpreter' | 'file_search';
|
||||
}
|
||||
|
||||
export default function MCPPanel() {
|
||||
const localize = useLocalize();
|
||||
const { showToast } = useToastContext();
|
||||
@@ -24,6 +31,7 @@ export default function MCPPanel() {
|
||||
const [selectedServerNameForEditing, setSelectedServerNameForEditing] = useState<string | null>(
|
||||
null,
|
||||
);
|
||||
const [showAddToolForm, setShowAddToolForm] = useState(true);
|
||||
|
||||
const mcpServerDefinitions = useMemo(() => {
|
||||
if (!startupConfig?.mcpServers) {
|
||||
@@ -87,19 +95,25 @@ export default function MCPPanel() {
|
||||
|
||||
const handleGoBackToList = () => {
|
||||
setSelectedServerNameForEditing(null);
|
||||
setShowAddToolForm(false);
|
||||
};
|
||||
|
||||
const handleShowAddToolForm = () => {
|
||||
setShowAddToolForm(true);
|
||||
setSelectedServerNameForEditing(null);
|
||||
};
|
||||
|
||||
if (startupConfigLoading) {
|
||||
return <MCPPanelSkeleton />;
|
||||
}
|
||||
|
||||
if (mcpServerDefinitions.length === 0) {
|
||||
return (
|
||||
<div className="p-4 text-center text-sm text-gray-500">
|
||||
{localize('com_sidepanel_mcp_no_servers_with_vars')}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
// if (mcpServerDefinitions.length === 0) {
|
||||
// return (
|
||||
// <div className="p-4 text-center text-sm text-gray-500">
|
||||
// {localize('com_sidepanel_mcp_no_servers_with_vars')}
|
||||
// </div>
|
||||
// );
|
||||
// }
|
||||
|
||||
if (selectedServerNameForEditing) {
|
||||
// Editing View
|
||||
@@ -138,10 +152,32 @@ export default function MCPPanel() {
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
} else if (showAddToolForm) {
|
||||
// Add Tool Form View
|
||||
return (
|
||||
<div className="h-auto max-w-full overflow-x-hidden p-3">
|
||||
<Button
|
||||
variant="outline"
|
||||
onClick={handleGoBackToList}
|
||||
className="mb-3 flex items-center px-3 py-2 text-sm"
|
||||
>
|
||||
<ChevronLeft className="mr-1 h-4 w-4" />
|
||||
{localize('com_ui_back')}
|
||||
</Button>
|
||||
<h3 className="mb-3 text-lg font-medium">{localize('com_ui_add_tool')}</h3>
|
||||
<AddToolForm onCancel={handleGoBackToList} />
|
||||
</div>
|
||||
);
|
||||
} else {
|
||||
// Server List View
|
||||
return (
|
||||
<div className="h-auto max-w-full overflow-x-hidden p-3">
|
||||
<div className="mb-3 flex items-center justify-between">
|
||||
<h3 className="text-lg font-medium">{localize('com_ui_mcp_servers')}</h3>
|
||||
<Button variant="outline" onClick={handleShowAddToolForm} className="text-sm">
|
||||
{localize('com_ui_add_tool')}
|
||||
</Button>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
{mcpServerDefinitions.map((server) => (
|
||||
<Button
|
||||
@@ -251,3 +287,131 @@ function MCPVariableEditor({ server, onSave, onRevoke, isSubmitting }: MCPVariab
|
||||
</form>
|
||||
);
|
||||
}
|
||||
|
||||
interface AddToolFormProps {
|
||||
onCancel: () => void;
|
||||
}
|
||||
|
||||
function AddToolForm({ onCancel }: AddToolFormProps) {
|
||||
const localize = useLocalize();
|
||||
const { showToast } = useToastContext();
|
||||
|
||||
const addToolMutation = useAddToolMutation({
|
||||
onSuccess: (data) => {
|
||||
showToast({
|
||||
message: localize('com_ui_tool_added_success', { '0': data.function?.name || 'Unknown' }),
|
||||
status: 'success',
|
||||
});
|
||||
onCancel();
|
||||
},
|
||||
onError: (error) => {
|
||||
console.error('Error adding tool:', error);
|
||||
showToast({
|
||||
message: localize('com_ui_tool_add_error'),
|
||||
status: 'error',
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
const {
|
||||
control,
|
||||
handleSubmit,
|
||||
formState: { errors, isDirty },
|
||||
} = useForm<AddToolFormData>({
|
||||
defaultValues: {
|
||||
name: '',
|
||||
description: '',
|
||||
type: 'function',
|
||||
},
|
||||
});
|
||||
|
||||
const onFormSubmit = (data: AddToolFormData) => {
|
||||
addToolMutation.mutate(data);
|
||||
};
|
||||
|
||||
return (
|
||||
<form onSubmit={handleSubmit(onFormSubmit)} className="mb-4 mt-2 space-y-4">
|
||||
<div className="space-y-2">
|
||||
<Label htmlFor="tool-name" className="text-sm font-medium">
|
||||
{localize('com_ui_tool_name')}
|
||||
</Label>
|
||||
<Controller
|
||||
name="name"
|
||||
control={control}
|
||||
rules={{ required: localize('com_ui_tool_name_required') }}
|
||||
render={({ field }) => (
|
||||
<Input
|
||||
id="tool-name"
|
||||
type="text"
|
||||
{...field}
|
||||
placeholder={localize('com_ui_enter_tool_name')}
|
||||
className="w-full rounded-md border-gray-300 shadow-sm focus:border-indigo-500 focus:ring-indigo-500 dark:border-gray-600 dark:bg-gray-700 dark:text-white sm:text-sm"
|
||||
/>
|
||||
)}
|
||||
/>
|
||||
{errors.name && <p className="text-xs text-red-500">{errors.name.message}</p>}
|
||||
</div>
|
||||
|
||||
<div className="space-y-2">
|
||||
<Label htmlFor="tool-description" className="text-sm font-medium">
|
||||
{localize('com_ui_description')}
|
||||
</Label>
|
||||
<Controller
|
||||
name="description"
|
||||
control={control}
|
||||
rules={{ required: localize('com_ui_description_required') }}
|
||||
render={({ field }) => (
|
||||
<Input
|
||||
id="tool-description"
|
||||
type="text"
|
||||
{...field}
|
||||
placeholder={localize('com_ui_enter_description')}
|
||||
className="w-full rounded-md border-gray-300 shadow-sm focus:border-indigo-500 focus:ring-indigo-500 dark:border-gray-600 dark:bg-gray-700 dark:text-white sm:text-sm"
|
||||
/>
|
||||
)}
|
||||
/>
|
||||
{errors.description && <p className="text-xs text-red-500">{errors.description.message}</p>}
|
||||
</div>
|
||||
|
||||
<div className="space-y-2">
|
||||
<Label htmlFor="tool-type" className="text-sm font-medium">
|
||||
{localize('com_ui_tool_type')}
|
||||
</Label>
|
||||
<Controller
|
||||
name="type"
|
||||
control={control}
|
||||
render={({ field }) => (
|
||||
<select
|
||||
id="tool-type"
|
||||
{...field}
|
||||
className="w-full rounded-md border-gray-300 shadow-sm focus:border-indigo-500 focus:ring-indigo-500 dark:border-gray-600 dark:bg-gray-700 dark:text-white sm:text-sm"
|
||||
>
|
||||
<option value="function">{localize('com_ui_function')}</option>
|
||||
<option value="code_interpreter">{localize('com_ui_code_interpreter')}</option>
|
||||
<option value="file_search">{localize('com_ui_file_search')}</option>
|
||||
</select>
|
||||
)}
|
||||
/>
|
||||
{errors.type && <p className="text-xs text-red-500">{errors.type.message}</p>}
|
||||
</div>
|
||||
|
||||
<div className="flex justify-end gap-2 pt-2">
|
||||
<Button
|
||||
type="button"
|
||||
variant="outline"
|
||||
onClick={onCancel}
|
||||
disabled={addToolMutation.isLoading}
|
||||
>
|
||||
{localize('com_ui_cancel')}
|
||||
</Button>
|
||||
<Button
|
||||
type="submit"
|
||||
className="bg-green-500 text-white hover:bg-green-600"
|
||||
disabled={addToolMutation.isLoading || !isDirty}
|
||||
>
|
||||
{addToolMutation.isLoading ? localize('com_ui_saving') : localize('com_ui_save')}
|
||||
</Button>
|
||||
</div>
|
||||
</form>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -46,6 +46,10 @@ function DynamicInput({
|
||||
setInputValue(e, !isNaN(Number(e.target.value)));
|
||||
};
|
||||
|
||||
const placeholderText = placeholderCode
|
||||
? localize(placeholder as TranslationKeys) || placeholder
|
||||
: placeholder;
|
||||
|
||||
return (
|
||||
<div
|
||||
className={`flex flex-col items-center justify-start gap-6 ${
|
||||
@@ -76,11 +80,7 @@ function DynamicInput({
|
||||
disabled={readonly}
|
||||
value={inputValue ?? defaultValue ?? ''}
|
||||
onChange={handleInputChange}
|
||||
placeholder={
|
||||
placeholderCode
|
||||
? localize(placeholder as TranslationKeys) || placeholder
|
||||
: placeholder
|
||||
}
|
||||
placeholder={placeholderText}
|
||||
className={cn(
|
||||
'flex h-10 max-h-10 w-full resize-none border-none bg-surface-secondary px-3 py-2',
|
||||
)}
|
||||
|
||||
@@ -29,7 +29,7 @@ function OptionHover({
|
||||
<HoverCardPortal>
|
||||
<HoverCardContent side={side} className={`z-[999] w-80 ${className}`} sideOffset={sideOffset}>
|
||||
<div className="space-y-2">
|
||||
<p className="text-sm text-gray-600 dark:text-gray-300">{text}</p>
|
||||
<p className="whitespace-pre-wrap text-sm text-text-secondary">{text}</p>
|
||||
</div>
|
||||
</HoverCardContent>
|
||||
</HoverCardPortal>
|
||||
|
||||
@@ -40,3 +40,44 @@ export const useToolCallMutation = <T extends t.ToolId>(
|
||||
},
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
* Interface for creating a new tool
|
||||
*/
|
||||
interface CreateToolData {
|
||||
name: string;
|
||||
description: string;
|
||||
type: 'function' | 'code_interpreter' | 'file_search';
|
||||
metadata?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Mutation hook for adding a new tool to the system
|
||||
* Note: Requires corresponding backend implementation of dataService.createTool
|
||||
*/
|
||||
export const useAddToolMutation = (
|
||||
// options?:
|
||||
// {
|
||||
// onMutate?: (variables: CreateToolData) => void | Promise<unknown>;
|
||||
// onError?: (error: Error, variables: CreateToolData, context: unknown) => void;
|
||||
// onSuccess?: (data: t.Tool, variables: CreateToolData, context: unknown) => void;
|
||||
// }
|
||||
options?: t.MutationOptions<Record<string, unknown>, CreateToolData>,
|
||||
): UseMutationResult<Record<string, unknown>, Error, CreateToolData> => {
|
||||
const queryClient = useQueryClient();
|
||||
|
||||
return useMutation(
|
||||
(toolData: CreateToolData) => {
|
||||
return dataService.createTool(toolData);
|
||||
},
|
||||
{
|
||||
onMutate: (variables) => options?.onMutate?.(variables),
|
||||
onError: (error, variables, context) => options?.onError?.(error, variables, context),
|
||||
onSuccess: (data, variables, context) => {
|
||||
// Invalidate tools list to trigger refetch
|
||||
queryClient.invalidateQueries([QueryKeys.tools]);
|
||||
return options?.onSuccess?.(data, variables, context);
|
||||
},
|
||||
},
|
||||
);
|
||||
};
|
||||
|
||||
@@ -194,7 +194,7 @@ export const useConversationTagsQuery = (
|
||||
/**
|
||||
* Hook for getting all available tools for Assistants
|
||||
*/
|
||||
export const useAvailableToolsQuery = <TData = t.TPlugin[]>(
|
||||
export const useAvailableToolsQuery = <TData = t.TPlugin[]>( // <-- this one
|
||||
endpoint: t.AssistantsEndpoint | EModelEndpoint.agents,
|
||||
config?: UseQueryOptions<t.TPlugin[], unknown, TData>,
|
||||
): QueryObserverResult<TData> => {
|
||||
|
||||
84
client/src/hooks/Files/useClientResize.ts
Normal file
84
client/src/hooks/Files/useClientResize.ts
Normal file
@@ -0,0 +1,84 @@
|
||||
import { mergeFileConfig } from 'librechat-data-provider';
|
||||
import { useCallback } from 'react';
|
||||
import { useGetFileConfig } from '~/data-provider';
|
||||
import {
|
||||
resizeImage,
|
||||
shouldResizeImage,
|
||||
supportsClientResize,
|
||||
type ResizeOptions,
|
||||
type ResizeResult,
|
||||
} from '~/utils/imageResize';
|
||||
|
||||
/**
|
||||
* Hook for client-side image resizing functionality
|
||||
* Integrates with LibreChat's file configuration system
|
||||
*/
|
||||
export const useClientResize = () => {
|
||||
const { data: fileConfig = null } = useGetFileConfig({
|
||||
select: (data) => mergeFileConfig(data),
|
||||
});
|
||||
|
||||
// Safe access to clientImageResize config with fallbacks
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
const config = (fileConfig as any)?.clientImageResize ?? {
|
||||
enabled: false,
|
||||
maxWidth: 1900,
|
||||
maxHeight: 1900,
|
||||
quality: 0.92,
|
||||
};
|
||||
const isEnabled = config?.enabled ?? false;
|
||||
|
||||
/**
|
||||
* Resizes an image if client-side resizing is enabled and supported
|
||||
* @param file - The image file to resize
|
||||
* @param options - Optional resize options to override defaults
|
||||
* @returns Promise resolving to either the resized file result or original file
|
||||
*/
|
||||
const resizeImageIfNeeded = useCallback(
|
||||
async (
|
||||
file: File,
|
||||
options?: Partial<ResizeOptions>,
|
||||
): Promise<{ file: File; resized: boolean; result?: ResizeResult }> => {
|
||||
// Return original file if resizing is disabled
|
||||
if (!isEnabled) {
|
||||
return { file, resized: false };
|
||||
}
|
||||
|
||||
// Return original file if browser doesn't support resizing
|
||||
if (!supportsClientResize()) {
|
||||
console.warn('Client-side image resizing not supported in this browser');
|
||||
return { file, resized: false };
|
||||
}
|
||||
|
||||
// Return original file if it doesn't need resizing
|
||||
if (!shouldResizeImage(file)) {
|
||||
return { file, resized: false };
|
||||
}
|
||||
|
||||
try {
|
||||
const resizeOptions: Partial<ResizeOptions> = {
|
||||
maxWidth: config?.maxWidth,
|
||||
maxHeight: config?.maxHeight,
|
||||
quality: config?.quality,
|
||||
...options,
|
||||
};
|
||||
|
||||
const result = await resizeImage(file, resizeOptions);
|
||||
return { file: result.file, resized: true, result };
|
||||
} catch (error) {
|
||||
console.warn('Client-side image resizing failed:', error);
|
||||
return { file, resized: false };
|
||||
}
|
||||
},
|
||||
[isEnabled, config],
|
||||
);
|
||||
|
||||
return {
|
||||
isEnabled,
|
||||
isSupported: supportsClientResize(),
|
||||
config,
|
||||
resizeImageIfNeeded,
|
||||
};
|
||||
};
|
||||
|
||||
export default useClientResize;
|
||||
@@ -18,6 +18,7 @@ import useLocalize, { TranslationKeys } from '~/hooks/useLocalize';
|
||||
import { useChatContext } from '~/Providers/ChatContext';
|
||||
import { useToastContext } from '~/Providers/ToastContext';
|
||||
import { logger, validateFiles } from '~/utils';
|
||||
import useClientResize from './useClientResize';
|
||||
import { processFileForUpload } from '~/utils/heicConverter';
|
||||
import { useDelayedUploadToast } from './useDelayedUploadToast';
|
||||
import useUpdateFiles from './useUpdateFiles';
|
||||
@@ -41,6 +42,7 @@ const useFileHandling = (params?: UseFileHandling) => {
|
||||
const { addFile, replaceFile, updateFileById, deleteFileById } = useUpdateFiles(
|
||||
params?.fileSetter ?? setFiles,
|
||||
);
|
||||
const { resizeImageIfNeeded } = useClientResize();
|
||||
|
||||
const agent_id = params?.additionalMetadata?.agent_id ?? '';
|
||||
const assistant_id = params?.additionalMetadata?.assistant_id ?? '';
|
||||
@@ -298,7 +300,7 @@ const useFileHandling = (params?: UseFileHandling) => {
|
||||
}
|
||||
|
||||
// Process file for HEIC conversion if needed
|
||||
const processedFile = await processFileForUpload(
|
||||
const heicProcessedFile = await processFileForUpload(
|
||||
originalFile,
|
||||
0.9,
|
||||
(conversionProgress) => {
|
||||
@@ -311,23 +313,50 @@ const useFileHandling = (params?: UseFileHandling) => {
|
||||
},
|
||||
);
|
||||
|
||||
// If file was converted, update with new file and preview
|
||||
if (processedFile !== originalFile) {
|
||||
let finalProcessedFile = heicProcessedFile;
|
||||
|
||||
// Apply client-side resizing if available and appropriate
|
||||
if (heicProcessedFile.type.startsWith('image/')) {
|
||||
try {
|
||||
const resizeResult = await resizeImageIfNeeded(heicProcessedFile);
|
||||
finalProcessedFile = resizeResult.file;
|
||||
|
||||
// Show toast notification if image was resized
|
||||
if (resizeResult.resized && resizeResult.result) {
|
||||
const { originalSize, newSize, compressionRatio } = resizeResult.result;
|
||||
const originalSizeMB = (originalSize / (1024 * 1024)).toFixed(1);
|
||||
const newSizeMB = (newSize / (1024 * 1024)).toFixed(1);
|
||||
const savedPercent = Math.round((1 - compressionRatio) * 100);
|
||||
|
||||
showToast({
|
||||
message: `Image resized: ${originalSizeMB}MB → ${newSizeMB}MB (${savedPercent}% smaller)`,
|
||||
status: 'success',
|
||||
duration: 3000,
|
||||
});
|
||||
}
|
||||
} catch (resizeError) {
|
||||
console.warn('Image resize failed, using original:', resizeError);
|
||||
// Continue with HEIC processed file if resizing fails
|
||||
}
|
||||
}
|
||||
|
||||
// If file was processed (HEIC converted or resized), update with new file and preview
|
||||
if (finalProcessedFile !== originalFile) {
|
||||
URL.revokeObjectURL(initialPreview); // Clean up original preview
|
||||
const newPreview = URL.createObjectURL(processedFile);
|
||||
const newPreview = URL.createObjectURL(finalProcessedFile);
|
||||
|
||||
const updatedExtendedFile: ExtendedFile = {
|
||||
...initialExtendedFile,
|
||||
file: processedFile,
|
||||
type: processedFile.type,
|
||||
file: finalProcessedFile,
|
||||
type: finalProcessedFile.type,
|
||||
preview: newPreview,
|
||||
progress: 0.5, // Conversion complete, ready for upload
|
||||
size: processedFile.size,
|
||||
progress: 0.5, // Processing complete, ready for upload
|
||||
size: finalProcessedFile.size,
|
||||
};
|
||||
|
||||
replaceFile(updatedExtendedFile);
|
||||
|
||||
const isImage = processedFile.type.split('/')[0] === 'image';
|
||||
const isImage = finalProcessedFile.type.split('/')[0] === 'image';
|
||||
if (isImage) {
|
||||
loadImage(updatedExtendedFile, newPreview);
|
||||
continue;
|
||||
@@ -335,7 +364,7 @@ const useFileHandling = (params?: UseFileHandling) => {
|
||||
|
||||
await startUpload(updatedExtendedFile);
|
||||
} else {
|
||||
// File wasn't converted, proceed with original
|
||||
// File wasn't processed, proceed with original
|
||||
const isImage = originalFile.type.split('/')[0] === 'image';
|
||||
const tool_resource =
|
||||
initialExtendedFile.tool_resource ?? params?.additionalMetadata?.tool_resource;
|
||||
|
||||
@@ -152,20 +152,20 @@ export default function useSideNavLinks({
|
||||
});
|
||||
}
|
||||
|
||||
if (
|
||||
startupConfig?.mcpServers &&
|
||||
Object.values(startupConfig.mcpServers).some(
|
||||
(server) => server.customUserVars && Object.keys(server.customUserVars).length > 0,
|
||||
)
|
||||
) {
|
||||
links.push({
|
||||
title: 'com_nav_setting_mcp',
|
||||
label: '',
|
||||
icon: MCPIcon,
|
||||
id: 'mcp-settings',
|
||||
Component: MCPPanel,
|
||||
});
|
||||
}
|
||||
// if (
|
||||
// startupConfig?.mcpServers &&
|
||||
// Object.values(startupConfig.mcpServers).some(
|
||||
// (server) => server.customUserVars && Object.keys(server.customUserVars).length > 0,
|
||||
// )
|
||||
// ) {
|
||||
links.push({
|
||||
title: 'com_nav_setting_mcp',
|
||||
label: '',
|
||||
icon: MCPIcon,
|
||||
id: 'mcp-settings',
|
||||
Component: MCPPanel,
|
||||
});
|
||||
// }
|
||||
|
||||
links.push({
|
||||
title: 'com_sidepanel_hide_panel',
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { useRef, useEffect, useCallback, useMemo } from 'react';
|
||||
import { useRecoilState } from 'recoil';
|
||||
import { Constants, LocalStorageKeys, EModelEndpoint } from 'librechat-data-provider';
|
||||
import type { TPlugin, TPluginAuthConfig } from 'librechat-data-provider';
|
||||
import type { TPlugin } from 'librechat-data-provider';
|
||||
import { useAvailableToolsQuery } from '~/data-provider';
|
||||
import useLocalStorage from '~/hooks/useLocalStorageAlt';
|
||||
import { ephemeralAgentByConvoId } from '~/store';
|
||||
@@ -24,20 +24,13 @@ interface UseMCPSelectOptions {
|
||||
conversationId?: string | null;
|
||||
}
|
||||
|
||||
export interface McpServerInfo {
|
||||
name: string;
|
||||
pluginKey: string;
|
||||
authConfig?: TPluginAuthConfig[];
|
||||
authenticated?: boolean;
|
||||
}
|
||||
|
||||
export function useMCPSelect({ conversationId }: UseMCPSelectOptions) {
|
||||
const key = conversationId ?? Constants.NEW_CONVO;
|
||||
const hasSetFetched = useRef<string | null>(null);
|
||||
const [ephemeralAgent, setEphemeralAgent] = useRecoilState(ephemeralAgentByConvoId(key));
|
||||
const { data: mcpToolDetails, isFetched } = useAvailableToolsQuery(EModelEndpoint.agents, {
|
||||
select: (data: TPlugin[]) => {
|
||||
const mcpToolsMap = new Map<string, McpServerInfo>();
|
||||
const mcpToolsMap = new Map<string, TPlugin>();
|
||||
data.forEach((tool) => {
|
||||
const isMCP = tool.pluginKey.includes(Constants.mcp_delimiter);
|
||||
if (isMCP && tool.chatMenu !== false) {
|
||||
@@ -109,13 +102,13 @@ export function useMCPSelect({ conversationId }: UseMCPSelectOptions) {
|
||||
}, [mcpToolDetails]);
|
||||
|
||||
return {
|
||||
isPinned,
|
||||
mcpValues,
|
||||
setIsPinned,
|
||||
setMCPValues,
|
||||
mcpServerNames,
|
||||
ephemeralAgent,
|
||||
mcpToolDetails,
|
||||
setEphemeralAgent,
|
||||
isPinned,
|
||||
setIsPinned,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -207,6 +207,8 @@
|
||||
"com_endpoint_google_temp": "Higher values = more random, while lower values = more focused and deterministic. We recommend altering this or Top P but not both.",
|
||||
"com_endpoint_google_topk": "Top-k changes how the model selects tokens for output. A top-k of 1 means the selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding), while a top-k of 3 means that the next token is selected from among the 3 most probable tokens (using temperature).",
|
||||
"com_endpoint_google_topp": "Top-p changes how the model selects tokens for output. Tokens are selected from most K (see topK parameter) probable to least until the sum of their probabilities equals the top-p value.",
|
||||
"com_endpoint_google_thinking": "Enables or disables reasoning. This setting is only supported by certain models (2.5 series). For older models, this setting may have no effect.",
|
||||
"com_endpoint_google_thinking_budget": "Guides the number of thinking tokens the model uses. The actual amount may exceed or fall below this value depending on the prompt.\n\nThis setting is only supported by certain models (2.5 series). Gemini 2.5 Pro supports 128-32,768 tokens. Gemini 2.5 Flash supports 0-24,576 tokens. Gemini 2.5 Flash Lite supports 512-24,576 tokens.\n\nLeave blank or set to \"-1\" to let the model automatically decide when and how much to think. By default, Gemini 2.5 Flash Lite does not think.",
|
||||
"com_endpoint_instructions_assistants": "Override Instructions",
|
||||
"com_endpoint_instructions_assistants_placeholder": "Overrides the instructions of the assistant. This is useful for modifying the behavior on a per-run basis.",
|
||||
"com_endpoint_max_output_tokens": "Max Output Tokens",
|
||||
@@ -582,6 +584,7 @@
|
||||
"com_ui_auth_url": "Authorization URL",
|
||||
"com_ui_authentication": "Authentication",
|
||||
"com_ui_authentication_type": "Authentication Type",
|
||||
"com_ui_auto": "Auto",
|
||||
"com_ui_available_tools": "Available Tools",
|
||||
"com_ui_avatar": "Avatar",
|
||||
"com_ui_azure": "Azure",
|
||||
@@ -1054,6 +1057,7 @@
|
||||
"com_ui_x_selected": "{{0}} selected",
|
||||
"com_ui_yes": "Yes",
|
||||
"com_ui_zoom": "Zoom",
|
||||
"com_ui_reset_zoom": "Reset Zoom",
|
||||
"com_user_message": "You",
|
||||
"com_warning_resubmit_unsupported": "Resubmitting the AI message is not supported for this endpoint."
|
||||
}
|
||||
}
|
||||
|
||||
@@ -818,14 +818,14 @@ pre {
|
||||
max-width: 65ch;
|
||||
font-size: var(--markdown-font-size, var(--font-size-base));
|
||||
line-height: calc(
|
||||
28px * var(--markdown-font-size, var(--font-size-base)) / var(--font-size-base)
|
||||
22px * var(--markdown-font-size, var(--font-size-base)) / var(--font-size-base)
|
||||
);
|
||||
}
|
||||
|
||||
.prose :where([class~='lead']):not(:where([class~='not-prose'] *)) {
|
||||
color: var(--tw-prose-lead);
|
||||
font-size: 1.25em;
|
||||
line-height: 1.6;
|
||||
line-height: 1.3;
|
||||
margin-bottom: 1.2em;
|
||||
margin-top: 1.2em;
|
||||
}
|
||||
@@ -853,8 +853,8 @@ pre {
|
||||
.prose :where(hr):not(:where([class~='not-prose'] *)) {
|
||||
border-color: var(--tw-prose-hr);
|
||||
border-top-width: 1px;
|
||||
margin-bottom: 3em;
|
||||
margin-top: 3em;
|
||||
margin-bottom: 0.8em;
|
||||
margin-top: 0.8em;
|
||||
}
|
||||
.prose :where(blockquote):not(:where([class~='not-prose'] *)) {
|
||||
border-left-color: var(--tw-prose-quote-borders);
|
||||
@@ -878,9 +878,9 @@ pre {
|
||||
color: var(--tw-prose-headings);
|
||||
font-size: 2.25em;
|
||||
font-weight: 800;
|
||||
line-height: 1.1111111;
|
||||
margin-bottom: 0.8888889em;
|
||||
margin-top: 0;
|
||||
line-height: 1;
|
||||
margin-bottom: 0.4em;
|
||||
margin-top: 0.6em;
|
||||
}
|
||||
.prose :where(h1 strong):not(:where([class~='not-prose'] *)) {
|
||||
color: inherit;
|
||||
@@ -890,9 +890,9 @@ pre {
|
||||
color: var(--tw-prose-headings);
|
||||
font-size: 1.5em;
|
||||
font-weight: 700;
|
||||
line-height: 1.3333333;
|
||||
margin-bottom: 1em;
|
||||
margin-top: 2em;
|
||||
line-height: 1.1;
|
||||
margin-bottom: 0.4em;
|
||||
margin-top: 0.8em;
|
||||
}
|
||||
.prose :where(h2 strong):not(:where([class~='not-prose'] *)) {
|
||||
color: inherit;
|
||||
@@ -902,9 +902,9 @@ pre {
|
||||
color: var(--tw-prose-headings);
|
||||
font-size: 1.25em;
|
||||
font-weight: 600;
|
||||
line-height: 1.6;
|
||||
margin-bottom: 0.6em;
|
||||
margin-top: 1.6em;
|
||||
line-height: 1.3;
|
||||
margin-bottom: 0.3em;
|
||||
margin-top: 0.6em;
|
||||
}
|
||||
.prose :where(h3 strong):not(:where([class~='not-prose'] *)) {
|
||||
color: inherit;
|
||||
@@ -913,9 +913,9 @@ pre {
|
||||
.prose :where(h4):not(:where([class~='not-prose'] *)) {
|
||||
color: var(--tw-prose-headings);
|
||||
font-weight: 600;
|
||||
line-height: 1.5;
|
||||
margin-bottom: 0.5em;
|
||||
margin-top: 1.5em;
|
||||
line-height: 1.2;
|
||||
margin-bottom: 0.3em;
|
||||
margin-top: 0.5em;
|
||||
}
|
||||
.prose :where(h4 strong):not(:where([class~='not-prose'] *)) {
|
||||
color: inherit;
|
||||
@@ -932,19 +932,19 @@ pre {
|
||||
.prose :where(figcaption):not(:where([class~='not-prose'] *)) {
|
||||
color: var(--tw-prose-captions);
|
||||
font-size: 0.875em;
|
||||
line-height: 1.4285714;
|
||||
line-height: 1.2;
|
||||
margin-top: 0.8571429em;
|
||||
}
|
||||
.prose :where(code):not(:where([class~='not-prose'] *)) {
|
||||
color: var(--tw-prose-code);
|
||||
font-size: 0.875em;
|
||||
font-weight: 600;
|
||||
background-color: var(--gray-200);
|
||||
padding: 0.125rem 0.25rem;
|
||||
border-radius: 0.35rem;
|
||||
}
|
||||
.prose :where(code):not(:where([class~='not-prose'] *)):before {
|
||||
content: '`';
|
||||
}
|
||||
.prose :where(code):not(:where([class~='not-prose'] *)):after {
|
||||
content: '`';
|
||||
.dark .prose :where(code):not(:where([class~='not-prose'] *)):not(:where(pre *)) {
|
||||
background-color: var(--gray-600);
|
||||
}
|
||||
.prose :where(a code):not(:where([class~='not-prose'] *)) {
|
||||
color: inherit;
|
||||
@@ -971,11 +971,11 @@ pre {
|
||||
}
|
||||
.prose :where(pre):not(:where([class~='not-prose'] *)) {
|
||||
background-color: transparent;
|
||||
border-radius: 0.375rem;
|
||||
border-radius: 0.75rem;
|
||||
color: currentColor;
|
||||
font-size: 0.875em;
|
||||
font-weight: 400;
|
||||
line-height: 1.7142857;
|
||||
line-height: 1.4;
|
||||
margin: 0;
|
||||
overflow-x: auto;
|
||||
padding: 0;
|
||||
@@ -999,7 +999,7 @@ pre {
|
||||
}
|
||||
.prose :where(table):not(:where([class~='not-prose'] *)) {
|
||||
font-size: 0.875em;
|
||||
line-height: 1.7142857;
|
||||
line-height: 1.4;
|
||||
margin-bottom: 2em;
|
||||
margin-top: 2em;
|
||||
table-layout: auto;
|
||||
@@ -1036,14 +1036,14 @@ pre {
|
||||
vertical-align: top;
|
||||
}
|
||||
.prose {
|
||||
--tw-prose-body: #374151;
|
||||
--tw-prose-body: #424242;
|
||||
--tw-prose-headings: #111827;
|
||||
--tw-prose-lead: #4b5563;
|
||||
--tw-prose-links: #0066cc;
|
||||
--tw-prose-bold: #111827;
|
||||
--tw-prose-counters: #6b7280;
|
||||
--tw-prose-bullets: #d1d5db;
|
||||
--tw-prose-hr: #e5e7eb;
|
||||
--tw-prose-hr: #cdcdcd;
|
||||
--tw-prose-quotes: #111827;
|
||||
--tw-prose-quote-borders: #e5e7eb;
|
||||
--tw-prose-captions: #6b7280;
|
||||
@@ -1059,17 +1059,17 @@ pre {
|
||||
--tw-prose-invert-bold: #fff;
|
||||
--tw-prose-invert-counters: #9ca3af;
|
||||
--tw-prose-invert-bullets: #4b5563;
|
||||
--tw-prose-invert-hr: #374151;
|
||||
--tw-prose-invert-hr: #424242;
|
||||
--tw-prose-invert-quotes: #f3f4f6;
|
||||
--tw-prose-invert-quote-borders: #374151;
|
||||
--tw-prose-invert-quote-borders: #424242;
|
||||
--tw-prose-invert-captions: #9ca3af;
|
||||
--tw-prose-invert-code: #fff;
|
||||
--tw-prose-invert-pre-code: #d1d5db;
|
||||
--tw-prose-invert-pre-bg: rgba(0, 0, 0, 0.5);
|
||||
--tw-prose-invert-th-borders: #4b5563;
|
||||
--tw-prose-invert-td-borders: #374151;
|
||||
--tw-prose-invert-td-borders: #424242;
|
||||
font-size: 1rem;
|
||||
line-height: 1.75;
|
||||
line-height: 1.4;
|
||||
}
|
||||
.prose :where(p):not(:where([class~='not-prose'] *)) {
|
||||
margin-bottom: 1.25em;
|
||||
@@ -1112,6 +1112,13 @@ pre {
|
||||
.prose :where(h4 + *):not(:where([class~='not-prose'] *)) {
|
||||
margin-top: 0;
|
||||
}
|
||||
/* Ensure symmetrical spacing around hr */
|
||||
.prose :where(* + hr):not(:where([class~='not-prose'] *)) {
|
||||
margin-top: 0.8em;
|
||||
}
|
||||
.prose :where(hr + h1, hr + h2, hr + h3, hr + h4):not(:where([class~='not-prose'] *)) {
|
||||
margin-top: 0.4em;
|
||||
}
|
||||
.prose :where(thead th:first-child):not(:where([class~='not-prose'] *)) {
|
||||
padding-left: 0;
|
||||
}
|
||||
@@ -1213,6 +1220,14 @@ pre {
|
||||
.prose-2xl :where(.prose > :last-child):not(:where([class~='not-prose'] *)) {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
.prose :where(ul > li):has(input[type='checkbox']):not(:where([class~='not-prose'] *)) {
|
||||
margin-bottom: 0;
|
||||
margin-top: 0;
|
||||
}
|
||||
.prose :where(ul > li):has(input[type='checkbox']) p:not(:where([class~='not-prose'] *)) {
|
||||
margin-bottom: 0;
|
||||
margin-top: 0;
|
||||
}
|
||||
|
||||
code,
|
||||
pre {
|
||||
@@ -1484,7 +1499,7 @@ html {
|
||||
max-width: none;
|
||||
font-size: var(--markdown-font-size, var(--font-size-base));
|
||||
line-height: calc(
|
||||
28px * var(--markdown-font-size, var(--font-size-base)) / var(--font-size-base)
|
||||
22px * var(--markdown-font-size, var(--font-size-base)) / var(--font-size-base)
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1496,8 +1511,8 @@ html {
|
||||
}
|
||||
|
||||
.markdown h2 {
|
||||
margin-bottom: 1rem;
|
||||
margin-top: 2rem;
|
||||
margin-bottom: 0.4rem;
|
||||
margin-top: 0.8rem;
|
||||
}
|
||||
|
||||
.markdown h3 {
|
||||
@@ -1507,8 +1522,8 @@ html {
|
||||
|
||||
.markdown h3,
|
||||
.markdown h4 {
|
||||
margin-bottom: 0.5rem;
|
||||
margin-top: 1rem;
|
||||
margin-bottom: 0.3rem;
|
||||
margin-top: 0.6rem;
|
||||
}
|
||||
|
||||
.markdown h4 {
|
||||
@@ -1523,7 +1538,7 @@ html {
|
||||
|
||||
.markdown blockquote {
|
||||
--tw-border-opacity: 1;
|
||||
border-color: rgba(142, 142, 160, var(--tw-border-opacity));
|
||||
border-color: var(--gray-400);
|
||||
border-left-width: 2px;
|
||||
line-height: 1rem;
|
||||
padding-left: 1rem;
|
||||
@@ -1551,6 +1566,7 @@ html {
|
||||
|
||||
.markdown th:last-child {
|
||||
border-right-width: 1px;
|
||||
border-color: #d1d5db;
|
||||
border-top-right-radius: 0.375rem;
|
||||
}
|
||||
|
||||
@@ -1751,16 +1767,16 @@ html {
|
||||
font-weight: 600;
|
||||
}
|
||||
.markdown h2 {
|
||||
margin-bottom: 1rem;
|
||||
margin-top: 2rem;
|
||||
margin-bottom: 0.4rem;
|
||||
margin-top: 0.8rem;
|
||||
}
|
||||
.markdown h3 {
|
||||
font-weight: 600;
|
||||
}
|
||||
.markdown h3,
|
||||
.markdown h4 {
|
||||
margin-bottom: 0.5rem;
|
||||
margin-top: 1rem;
|
||||
margin-bottom: 0.3rem;
|
||||
margin-top: 0.6rem;
|
||||
}
|
||||
.markdown h4 {
|
||||
font-weight: 400;
|
||||
@@ -1770,45 +1786,63 @@ html {
|
||||
}
|
||||
.markdown blockquote {
|
||||
--tw-border-opacity: 1;
|
||||
border-color: rgba(142, 142, 160, var(--tw-border-opacity));
|
||||
border-color: var(--gray-300);
|
||||
border-left-width: 2px;
|
||||
line-height: 1rem;
|
||||
padding-left: 1rem;
|
||||
}
|
||||
.dark .markdown blockquote {
|
||||
border-color: var(--gray-600);
|
||||
}
|
||||
.markdown table {
|
||||
--tw-border-spacing-x: 0px;
|
||||
--tw-border-spacing-y: 0px;
|
||||
border-collapse: separate;
|
||||
border-spacing: var(--tw-border-spacing-x) var(--tw-border-spacing-y);
|
||||
width: 100%;
|
||||
border-color: var(--gray-300);
|
||||
}
|
||||
.markdown th {
|
||||
background-color: rgba(236, 236, 241, 0.2);
|
||||
background-color: var(--gray-100);
|
||||
border-bottom-width: 1px;
|
||||
border-left-width: 1px;
|
||||
border-top-width: 1px;
|
||||
border-color: var(--gray-300);
|
||||
padding: 0.25rem 0.75rem;
|
||||
font-weight: 600;
|
||||
}
|
||||
.dark .markdown th {
|
||||
border-color: var(--gray-600);
|
||||
background-color: var(--gray-600);
|
||||
}
|
||||
.markdown th:first-child {
|
||||
border-top-left-radius: 0.375rem;
|
||||
border-top-left-radius: 0.75rem;
|
||||
}
|
||||
.markdown th:last-child {
|
||||
border-right-width: 1px;
|
||||
border-top-right-radius: 0.375rem;
|
||||
border-top-right-radius: 0.75rem;
|
||||
}
|
||||
.markdown td {
|
||||
border-bottom-width: 1px;
|
||||
border-left-width: 1px;
|
||||
border-color: var(--gray-300);
|
||||
padding: 0.25rem 0.75rem;
|
||||
}
|
||||
.markdown td:last-child {
|
||||
border-right-width: 1px;
|
||||
border-color: var(--gray-300);
|
||||
}
|
||||
.dark .markdown td {
|
||||
border-color: var(--gray-600);
|
||||
}
|
||||
.dark .markdown td:last-child {
|
||||
border-color: var(--gray-600);
|
||||
}
|
||||
.markdown tbody tr:last-child td:first-child {
|
||||
border-bottom-left-radius: 0.375rem;
|
||||
border-bottom-left-radius: 0.75rem;
|
||||
}
|
||||
.markdown tbody tr:last-child td:last-child {
|
||||
border-bottom-right-radius: 0.375rem;
|
||||
border-bottom-right-radius: 0.75rem;
|
||||
}
|
||||
.markdown a {
|
||||
text-decoration-line: underline;
|
||||
@@ -2011,7 +2045,7 @@ html {
|
||||
|
||||
.dark .assistant-item:after {
|
||||
--tw-shadow: inset 0 0 0 1px rgba(0, 0, 0, 0.25);
|
||||
--tw-shadow-colored: inset 0 0 0 1px var(--tw-shadow-color);
|
||||
--tw-shadow-colored: inset 0 0 0 0 1px var(--tw-shadow-color);
|
||||
}
|
||||
|
||||
.result-streaming > :not(ol):not(ul):not(pre):last-child:after,
|
||||
@@ -2248,7 +2282,13 @@ html {
|
||||
/* Nested unordered lists */
|
||||
.prose ul ul,
|
||||
.markdown ul ul {
|
||||
list-style-type: circle;
|
||||
list-style-type: disc;
|
||||
}
|
||||
|
||||
.prose ul ul > li::marker,
|
||||
.markdown ul ul > li::marker {
|
||||
color: var(--tw-prose-bullets);
|
||||
font-size: 0.8em;
|
||||
}
|
||||
|
||||
.prose ul ul ul,
|
||||
@@ -2256,6 +2296,12 @@ html {
|
||||
list-style-type: square;
|
||||
}
|
||||
|
||||
.prose ul ul ul > li::marker,
|
||||
.markdown ul ul ul > li::marker {
|
||||
color: var(--tw-prose-bullets);
|
||||
font-size: 0.7em;
|
||||
}
|
||||
|
||||
/* Nested lists */
|
||||
.prose ol ol,
|
||||
.prose ul ul,
|
||||
@@ -2450,7 +2496,7 @@ html {
|
||||
|
||||
.message-content {
|
||||
font-size: var(--markdown-font-size, var(--font-size-base));
|
||||
line-height: 1.75;
|
||||
line-height: 1.4;
|
||||
}
|
||||
|
||||
.message-content pre code {
|
||||
|
||||
108
client/src/utils/__tests__/imageResize.test.ts
Normal file
108
client/src/utils/__tests__/imageResize.test.ts
Normal file
@@ -0,0 +1,108 @@
|
||||
/**
|
||||
* Tests for client-side image resizing utility
|
||||
*/
|
||||
|
||||
import { shouldResizeImage, supportsClientResize } from '../imageResize';
|
||||
|
||||
// Mock browser APIs for testing
|
||||
Object.defineProperty(global, 'HTMLCanvasElement', {
|
||||
value: function () {
|
||||
return {
|
||||
getContext: () => ({
|
||||
drawImage: jest.fn(),
|
||||
}),
|
||||
toBlob: jest.fn(),
|
||||
};
|
||||
},
|
||||
writable: true,
|
||||
});
|
||||
|
||||
Object.defineProperty(global, 'FileReader', {
|
||||
value: function () {
|
||||
return {
|
||||
readAsDataURL: jest.fn(),
|
||||
};
|
||||
},
|
||||
writable: true,
|
||||
});
|
||||
|
||||
Object.defineProperty(global, 'Image', {
|
||||
value: function () {
|
||||
return {};
|
||||
},
|
||||
writable: true,
|
||||
});
|
||||
|
||||
describe('imageResize utility', () => {
|
||||
describe('supportsClientResize', () => {
|
||||
it('should return true when all required APIs are available', () => {
|
||||
const result = supportsClientResize();
|
||||
expect(result).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false when HTMLCanvasElement is not available', () => {
|
||||
const originalCanvas = global.HTMLCanvasElement;
|
||||
// @ts-ignore
|
||||
delete global.HTMLCanvasElement;
|
||||
|
||||
const result = supportsClientResize();
|
||||
expect(result).toBe(false);
|
||||
|
||||
global.HTMLCanvasElement = originalCanvas;
|
||||
});
|
||||
});
|
||||
|
||||
describe('shouldResizeImage', () => {
|
||||
it('should return true for large image files', () => {
|
||||
const largeImageFile = new File([''], 'test.jpg', {
|
||||
type: 'image/jpeg',
|
||||
lastModified: Date.now(),
|
||||
});
|
||||
|
||||
// Mock large file size
|
||||
Object.defineProperty(largeImageFile, 'size', {
|
||||
value: 100 * 1024 * 1024, // 100MB
|
||||
writable: false,
|
||||
});
|
||||
|
||||
const result = shouldResizeImage(largeImageFile, 50 * 1024 * 1024); // 50MB limit
|
||||
expect(result).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false for small image files', () => {
|
||||
const smallImageFile = new File([''], 'test.jpg', {
|
||||
type: 'image/jpeg',
|
||||
lastModified: Date.now(),
|
||||
});
|
||||
|
||||
// Mock small file size
|
||||
Object.defineProperty(smallImageFile, 'size', {
|
||||
value: 1024, // 1KB
|
||||
writable: false,
|
||||
});
|
||||
|
||||
const result = shouldResizeImage(smallImageFile, 50 * 1024 * 1024); // 50MB limit
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
|
||||
it('should return false for non-image files', () => {
|
||||
const textFile = new File([''], 'test.txt', {
|
||||
type: 'text/plain',
|
||||
lastModified: Date.now(),
|
||||
});
|
||||
|
||||
const result = shouldResizeImage(textFile);
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
|
||||
it('should return false for GIF files', () => {
|
||||
const gifFile = new File([''], 'test.gif', {
|
||||
type: 'image/gif',
|
||||
lastModified: Date.now(),
|
||||
});
|
||||
|
||||
const result = shouldResizeImage(gifFile);
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
});
|
||||
});
|
||||
215
client/src/utils/imageResize.ts
Normal file
215
client/src/utils/imageResize.ts
Normal file
@@ -0,0 +1,215 @@
|
||||
/**
|
||||
* Client-side image resizing utility for LibreChat
|
||||
* Resizes images to prevent backend upload errors while maintaining quality
|
||||
*/
|
||||
|
||||
export interface ResizeOptions {
|
||||
maxWidth?: number;
|
||||
maxHeight?: number;
|
||||
quality?: number;
|
||||
format?: 'jpeg' | 'png' | 'webp';
|
||||
}
|
||||
|
||||
export interface ResizeResult {
|
||||
file: File;
|
||||
originalSize: number;
|
||||
newSize: number;
|
||||
originalDimensions: { width: number; height: number };
|
||||
newDimensions: { width: number; height: number };
|
||||
compressionRatio: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Default resize options based on backend 'high' resolution settings
|
||||
* Backend 'high' uses maxShortSide=768, maxLongSide=2000
|
||||
* We use slightly smaller values to ensure no backend resizing is triggered
|
||||
*/
|
||||
const DEFAULT_RESIZE_OPTIONS: ResizeOptions = {
|
||||
maxWidth: 1900, // Slightly less than backend maxLongSide=2000
|
||||
maxHeight: 1900, // Slightly less than backend maxLongSide=2000
|
||||
quality: 0.92, // High quality while reducing file size
|
||||
format: 'jpeg', // Most compatible format
|
||||
};
|
||||
|
||||
/**
|
||||
* Checks if the browser supports canvas-based image resizing
|
||||
*/
|
||||
export function supportsClientResize(): boolean {
|
||||
try {
|
||||
// Check for required APIs
|
||||
if (typeof HTMLCanvasElement === 'undefined') return false;
|
||||
if (typeof FileReader === 'undefined') return false;
|
||||
if (typeof Image === 'undefined') return false;
|
||||
|
||||
// Test canvas creation
|
||||
const canvas = document.createElement('canvas');
|
||||
const ctx = canvas.getContext('2d');
|
||||
|
||||
return !!(ctx && ctx.drawImage && canvas.toBlob);
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates new dimensions while maintaining aspect ratio
|
||||
*/
|
||||
function calculateDimensions(
|
||||
originalWidth: number,
|
||||
originalHeight: number,
|
||||
maxWidth: number,
|
||||
maxHeight: number,
|
||||
): { width: number; height: number } {
|
||||
const { width, height } = { width: originalWidth, height: originalHeight };
|
||||
|
||||
// If image is smaller than max dimensions, don't upscale
|
||||
if (width <= maxWidth && height <= maxHeight) {
|
||||
return { width, height };
|
||||
}
|
||||
|
||||
// Calculate scaling factor
|
||||
const widthRatio = maxWidth / width;
|
||||
const heightRatio = maxHeight / height;
|
||||
const scalingFactor = Math.min(widthRatio, heightRatio);
|
||||
|
||||
return {
|
||||
width: Math.round(width * scalingFactor),
|
||||
height: Math.round(height * scalingFactor),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Resizes an image file using canvas
|
||||
*/
|
||||
export function resizeImage(
|
||||
file: File,
|
||||
options: Partial<ResizeOptions> = {},
|
||||
): Promise<ResizeResult> {
|
||||
return new Promise((resolve, reject) => {
|
||||
// Check browser support
|
||||
if (!supportsClientResize()) {
|
||||
reject(new Error('Browser does not support client-side image resizing'));
|
||||
return;
|
||||
}
|
||||
|
||||
// Only process image files
|
||||
if (!file.type.startsWith('image/')) {
|
||||
reject(new Error('File is not an image'));
|
||||
return;
|
||||
}
|
||||
|
||||
const opts = { ...DEFAULT_RESIZE_OPTIONS, ...options };
|
||||
const reader = new FileReader();
|
||||
|
||||
reader.onload = (event) => {
|
||||
const img = new Image();
|
||||
|
||||
img.onload = () => {
|
||||
try {
|
||||
const originalDimensions = { width: img.width, height: img.height };
|
||||
const newDimensions = calculateDimensions(
|
||||
img.width,
|
||||
img.height,
|
||||
opts.maxWidth!,
|
||||
opts.maxHeight!,
|
||||
);
|
||||
|
||||
// If no resizing needed, return original file
|
||||
if (
|
||||
newDimensions.width === originalDimensions.width &&
|
||||
newDimensions.height === originalDimensions.height
|
||||
) {
|
||||
resolve({
|
||||
file,
|
||||
originalSize: file.size,
|
||||
newSize: file.size,
|
||||
originalDimensions,
|
||||
newDimensions,
|
||||
compressionRatio: 1,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Create canvas and resize
|
||||
const canvas = document.createElement('canvas');
|
||||
const ctx = canvas.getContext('2d')!;
|
||||
|
||||
canvas.width = newDimensions.width;
|
||||
canvas.height = newDimensions.height;
|
||||
|
||||
// Use high-quality image smoothing
|
||||
ctx.imageSmoothingEnabled = true;
|
||||
ctx.imageSmoothingQuality = 'high';
|
||||
|
||||
// Draw resized image
|
||||
ctx.drawImage(img, 0, 0, newDimensions.width, newDimensions.height);
|
||||
|
||||
// Convert to blob
|
||||
canvas.toBlob(
|
||||
(blob) => {
|
||||
if (!blob) {
|
||||
reject(new Error('Failed to create blob from canvas'));
|
||||
return;
|
||||
}
|
||||
|
||||
// Create new file with same name but potentially different extension
|
||||
const extension = opts.format === 'jpeg' ? '.jpg' : `.${opts.format}`;
|
||||
const baseName = file.name.replace(/\.[^/.]+$/, '');
|
||||
const newFileName = `${baseName}${extension}`;
|
||||
|
||||
const resizedFile = new File([blob], newFileName, {
|
||||
type: `image/${opts.format}`,
|
||||
lastModified: Date.now(),
|
||||
});
|
||||
|
||||
resolve({
|
||||
file: resizedFile,
|
||||
originalSize: file.size,
|
||||
newSize: resizedFile.size,
|
||||
originalDimensions,
|
||||
newDimensions,
|
||||
compressionRatio: resizedFile.size / file.size,
|
||||
});
|
||||
},
|
||||
`image/${opts.format}`,
|
||||
opts.quality,
|
||||
);
|
||||
} catch (error) {
|
||||
reject(error);
|
||||
}
|
||||
};
|
||||
|
||||
img.onerror = () => reject(new Error('Failed to load image'));
|
||||
img.src = event.target?.result as string;
|
||||
};
|
||||
|
||||
reader.onerror = () => reject(new Error('Failed to read file'));
|
||||
reader.readAsDataURL(file);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if an image should be resized based on size and dimensions
|
||||
*/
|
||||
export function shouldResizeImage(
|
||||
file: File,
|
||||
fileSizeLimit: number = 512 * 1024 * 1024, // 512MB default
|
||||
): boolean {
|
||||
// Don't resize if file is already small
|
||||
if (file.size < fileSizeLimit * 0.1) {
|
||||
// Less than 10% of limit
|
||||
return false;
|
||||
}
|
||||
|
||||
// Don't process non-images
|
||||
if (!file.type.startsWith('image/')) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Don't process GIFs (they might be animated)
|
||||
if (file.type === 'image/gif') {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -13,6 +13,9 @@ cache: true
|
||||
# Custom interface configuration
|
||||
interface:
|
||||
customWelcome: "Welcome to LibreChat! Enjoy your experience."
|
||||
# MCP Servers UI configuration
|
||||
mcpServers:
|
||||
placeholder: 'MCP Servers'
|
||||
# Privacy policy settings
|
||||
privacyPolicy:
|
||||
externalUrl: 'https://librechat.ai/privacy-policy'
|
||||
@@ -70,6 +73,8 @@ interface:
|
||||
bookmarks: true
|
||||
multiConvo: true
|
||||
agents: true
|
||||
# Temporary chat retention period in hours (default: 720, min: 1, max: 8760)
|
||||
# temporaryChatRetention: 1
|
||||
|
||||
# Example Cloudflare turnstile (optional)
|
||||
#turnstile:
|
||||
@@ -297,6 +302,12 @@ endpoints:
|
||||
# imageGeneration: # Image Gen settings, either percentage or px
|
||||
# percentage: 100
|
||||
# px: 1024
|
||||
# # Client-side image resizing to prevent upload errors
|
||||
# clientImageResize:
|
||||
# enabled: false # Enable/disable client-side image resizing (default: false)
|
||||
# maxWidth: 1900 # Maximum width for resized images (default: 1900)
|
||||
# maxHeight: 1900 # Maximum height for resized images (default: 1900)
|
||||
# quality: 0.92 # JPEG quality for compression (0.0-1.0, default: 0.92)
|
||||
# # See the Custom Configuration Guide for more information on Assistants Config:
|
||||
# # https://www.librechat.ai/docs/configuration/librechat_yaml/object_structure/assistants_endpoint
|
||||
|
||||
|
||||
57
package-lock.json
generated
57
package-lock.json
generated
@@ -64,14 +64,13 @@
|
||||
"@langchain/google-genai": "^0.2.13",
|
||||
"@langchain/google-vertexai": "^0.2.13",
|
||||
"@langchain/textsplitters": "^0.1.0",
|
||||
"@librechat/agents": "^2.4.41",
|
||||
"@librechat/agents": "^2.4.46",
|
||||
"@librechat/api": "*",
|
||||
"@librechat/data-schemas": "*",
|
||||
"@node-saml/passport-saml": "^5.0.0",
|
||||
"@waylaidwanderer/fetch-event-source": "^3.0.1",
|
||||
"axios": "^1.8.2",
|
||||
"bcryptjs": "^2.4.3",
|
||||
"cohere-ai": "^7.9.1",
|
||||
"compression": "^1.7.4",
|
||||
"connect-redis": "^7.1.0",
|
||||
"cookie": "^0.7.2",
|
||||
@@ -19127,9 +19126,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@langchain/langgraph": {
|
||||
"version": "0.3.4",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/langgraph/-/langgraph-0.3.4.tgz",
|
||||
"integrity": "sha512-Vuja8Qtu3Zjx7k4fK7Cnw+p8gtvIRPciWp9btPhAs3aUo6aBgOJOZVcK5Ii3mHfEHK/aQmRElR0x/u/YwykOrg==",
|
||||
"version": "0.3.5",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/langgraph/-/langgraph-0.3.5.tgz",
|
||||
"integrity": "sha512-7astlgnp6BdMQJqmr+cbDgR10FYWNCaDLnbfEDHpqhKCCajU59m5snOdl4Vtu5UM6V2k3lgatNqWoflBtxhIyg==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@langchain/langgraph-checkpoint": "~0.0.18",
|
||||
@@ -19437,9 +19436,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@librechat/agents": {
|
||||
"version": "2.4.41",
|
||||
"resolved": "https://registry.npmjs.org/@librechat/agents/-/agents-2.4.41.tgz",
|
||||
"integrity": "sha512-kYmdk5WVRp0qZxTx6BuGCs4l0Ir9iBLLx4ZY4/1wxr80al5/vq3P8wbgGdKMeO2qTu4ZaT4RyWRQYWBg5HDkUQ==",
|
||||
"version": "2.4.46",
|
||||
"resolved": "https://registry.npmjs.org/@librechat/agents/-/agents-2.4.46.tgz",
|
||||
"integrity": "sha512-zR27U19/WGF3HN64oBbiaFgjjWHaF7BjYzRFWzQKEkk+iEzCe59IpuEZUizQ54YcY02nhhh6S3MNUjhAJwMYVA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@langchain/anthropic": "^0.3.23",
|
||||
@@ -19989,9 +19988,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@librechat/agents/node_modules/@langchain/openai": {
|
||||
"version": "0.5.14",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/openai/-/openai-0.5.14.tgz",
|
||||
"integrity": "sha512-0GEj5K/qi1MRuZ4nE7NvyI4jTG+RSewLZqsExUwRukWdeqmkPNHGrogTa5ZDt7eaJxAaY7EgLC5ZnvCM3L1oug==",
|
||||
"version": "0.5.15",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/openai/-/openai-0.5.15.tgz",
|
||||
"integrity": "sha512-ANadEHyAj5sufQpz+SOPpKbyoMcTLhnh8/d+afbSPUqWsIMPpEFX3HoSY3nrBPG6l4NQQNG5P5oHb4SdC8+YIg==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"js-tiktoken": "^1.0.12",
|
||||
@@ -20028,9 +20027,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@librechat/agents/node_modules/openai": {
|
||||
"version": "5.5.1",
|
||||
"resolved": "https://registry.npmjs.org/openai/-/openai-5.5.1.tgz",
|
||||
"integrity": "sha512-5i19097mGotHA1eFsM6Tjd/tJ8uo9sa5Ysv4Q6bKJ2vtN6rc0MzMrUefXnLXYAJcmMQrC1Efhj0AvfIkXrQamw==",
|
||||
"version": "5.7.0",
|
||||
"resolved": "https://registry.npmjs.org/openai/-/openai-5.7.0.tgz",
|
||||
"integrity": "sha512-zXWawZl6J/P5Wz57/nKzVT3kJQZvogfuyuNVCdEp4/XU2UNrjL7SsuNpWAyLZbo6HVymwmnfno9toVzBhelygA==",
|
||||
"license": "Apache-2.0",
|
||||
"bin": {
|
||||
"openai": "bin/cli"
|
||||
@@ -28101,6 +28100,8 @@
|
||||
"version": "7.9.1",
|
||||
"resolved": "https://registry.npmjs.org/cohere-ai/-/cohere-ai-7.9.1.tgz",
|
||||
"integrity": "sha512-shMz0Bs3p6/Nw5Yi+6Wc9tZ7DCGTtEnf1eAcuesnlyeKoFuZ7+bzeiHkt5E8SvTgAHxN1GCP3UkIoW85QhHKTA==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"form-data": "4.0.0",
|
||||
"js-base64": "3.7.2",
|
||||
@@ -28113,6 +28114,8 @@
|
||||
"version": "6.11.2",
|
||||
"resolved": "https://registry.npmjs.org/qs/-/qs-6.11.2.tgz",
|
||||
"integrity": "sha512-tDNIz22aBzCDxLtVH++VnTfzxlfeK5CbqohpSqpJgj1Wg/cQbStNAz3NuqCs5vV+pjBsK4x4pN9HlVh7rcYRiA==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"side-channel": "^1.0.4"
|
||||
},
|
||||
@@ -34441,7 +34444,9 @@
|
||||
"node_modules/js-base64": {
|
||||
"version": "3.7.2",
|
||||
"resolved": "https://registry.npmjs.org/js-base64/-/js-base64-3.7.2.tgz",
|
||||
"integrity": "sha512-NnRs6dsyqUXejqk/yv2aiXlAvOs56sLkX6nUdeaNezI5LFFLlsZjOThmwnrcwh5ZZRwZlCMnVAY3CvhIhoVEKQ=="
|
||||
"integrity": "sha512-NnRs6dsyqUXejqk/yv2aiXlAvOs56sLkX6nUdeaNezI5LFFLlsZjOThmwnrcwh5ZZRwZlCMnVAY3CvhIhoVEKQ==",
|
||||
"optional": true,
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/js-cookie": {
|
||||
"version": "3.0.5",
|
||||
@@ -44808,7 +44813,9 @@
|
||||
"node_modules/url-join": {
|
||||
"version": "4.0.1",
|
||||
"resolved": "https://registry.npmjs.org/url-join/-/url-join-4.0.1.tgz",
|
||||
"integrity": "sha512-jk1+QP6ZJqyOiuEI9AEWQfju/nB2Pw466kbA0LEZljHwKeMgd9WrAEgEGxjPDD2+TNbbb37rTyhEfrCXfuKXnA=="
|
||||
"integrity": "sha512-jk1+QP6ZJqyOiuEI9AEWQfju/nB2Pw466kbA0LEZljHwKeMgd9WrAEgEGxjPDD2+TNbbb37rTyhEfrCXfuKXnA==",
|
||||
"optional": true,
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/url-parse": {
|
||||
"version": "1.5.10",
|
||||
@@ -46527,7 +46534,7 @@
|
||||
},
|
||||
"packages/api": {
|
||||
"name": "@librechat/api",
|
||||
"version": "1.2.4",
|
||||
"version": "1.2.5",
|
||||
"license": "ISC",
|
||||
"devDependencies": {
|
||||
"@babel/preset-env": "^7.21.5",
|
||||
@@ -46559,13 +46566,14 @@
|
||||
"typescript": "^5.0.4"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@librechat/agents": "^2.4.41",
|
||||
"@librechat/agents": "^2.4.46",
|
||||
"@librechat/data-schemas": "*",
|
||||
"@modelcontextprotocol/sdk": "^1.12.3",
|
||||
"axios": "^1.8.2",
|
||||
"diff": "^7.0.0",
|
||||
"eventsource": "^3.0.2",
|
||||
"express": "^4.21.2",
|
||||
"js-yaml": "^4.1.0",
|
||||
"keyv": "^5.3.2",
|
||||
"librechat-data-provider": "*",
|
||||
"node-fetch": "2.7.0",
|
||||
@@ -46575,10 +46583,11 @@
|
||||
}
|
||||
},
|
||||
"packages/api/node_modules/brace-expansion": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
|
||||
"integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
|
||||
"integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"balanced-match": "^1.0.0"
|
||||
}
|
||||
@@ -46832,9 +46841,9 @@
|
||||
}
|
||||
},
|
||||
"packages/data-schemas/node_modules/brace-expansion": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
|
||||
"integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
|
||||
"integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@librechat/api",
|
||||
"version": "1.2.4",
|
||||
"version": "1.2.5",
|
||||
"type": "commonjs",
|
||||
"description": "MCP services for LibreChat",
|
||||
"main": "dist/index.js",
|
||||
@@ -69,13 +69,14 @@
|
||||
"registry": "https://registry.npmjs.org/"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@librechat/agents": "^2.4.41",
|
||||
"@librechat/agents": "^2.4.46",
|
||||
"@librechat/data-schemas": "*",
|
||||
"@modelcontextprotocol/sdk": "^1.12.3",
|
||||
"axios": "^1.8.2",
|
||||
"diff": "^7.0.0",
|
||||
"eventsource": "^3.0.2",
|
||||
"express": "^4.21.2",
|
||||
"js-yaml": "^4.1.0",
|
||||
"keyv": "^5.3.2",
|
||||
"librechat-data-provider": "*",
|
||||
"node-fetch": "2.7.0",
|
||||
|
||||
@@ -46,7 +46,10 @@ export async function createRun({
|
||||
customHandlers?: Record<GraphEvents, EventHandler>;
|
||||
}): Promise<Run<IState>> {
|
||||
const provider =
|
||||
providerEndpointMap[agent.provider as keyof typeof providerEndpointMap] ?? agent.provider;
|
||||
(providerEndpointMap[
|
||||
agent.provider as keyof typeof providerEndpointMap
|
||||
] as unknown as Providers) ?? agent.provider;
|
||||
|
||||
const llmConfig: t.RunLLMConfig = Object.assign(
|
||||
{
|
||||
provider,
|
||||
@@ -66,7 +69,9 @@ export async function createRun({
|
||||
}
|
||||
|
||||
let reasoningKey: 'reasoning_content' | 'reasoning' | undefined;
|
||||
if (
|
||||
if (provider === Providers.GOOGLE) {
|
||||
reasoningKey = 'reasoning';
|
||||
} else if (
|
||||
llmConfig.configuration?.baseURL?.includes(KnownEndpoints.openrouter) ||
|
||||
(agent.endpoint && agent.endpoint.toLowerCase().includes(KnownEndpoints.openrouter))
|
||||
) {
|
||||
|
||||
1
packages/api/src/endpoints/google/index.ts
Normal file
1
packages/api/src/endpoints/google/index.ts
Normal file
@@ -0,0 +1 @@
|
||||
export * from './llm';
|
||||
@@ -1,13 +1,15 @@
|
||||
const { Providers } = require('@librechat/agents');
|
||||
const { AuthKeys } = require('librechat-data-provider');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
import { Providers } from '@librechat/agents';
|
||||
import { googleSettings, AuthKeys } from 'librechat-data-provider';
|
||||
import type { GoogleClientOptions, VertexAIClientOptions } from '@librechat/agents';
|
||||
import type * as t from '~/types';
|
||||
import { isEnabled } from '~/utils';
|
||||
|
||||
function getThresholdMapping(model) {
|
||||
function getThresholdMapping(model: string) {
|
||||
const gemini1Pattern = /gemini-(1\.0|1\.5|pro$|1\.0-pro|1\.5-pro|1\.5-flash-001)/;
|
||||
const restrictedPattern = /(gemini-(1\.5-flash-8b|2\.0|exp)|learnlm)/;
|
||||
|
||||
if (gemini1Pattern.test(model)) {
|
||||
return (value) => {
|
||||
return (value: string) => {
|
||||
if (value === 'OFF') {
|
||||
return 'BLOCK_NONE';
|
||||
}
|
||||
@@ -16,7 +18,7 @@ function getThresholdMapping(model) {
|
||||
}
|
||||
|
||||
if (restrictedPattern.test(model)) {
|
||||
return (value) => {
|
||||
return (value: string) => {
|
||||
if (value === 'OFF' || value === 'HARM_BLOCK_THRESHOLD_UNSPECIFIED') {
|
||||
return 'BLOCK_NONE';
|
||||
}
|
||||
@@ -24,19 +26,16 @@ function getThresholdMapping(model) {
|
||||
};
|
||||
}
|
||||
|
||||
return (value) => value;
|
||||
return (value: string) => value;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {string} model
|
||||
* @returns {Array<{category: string, threshold: string}> | undefined}
|
||||
*/
|
||||
function getSafetySettings(model) {
|
||||
export function getSafetySettings(
|
||||
model?: string,
|
||||
): Array<{ category: string; threshold: string }> | undefined {
|
||||
if (isEnabled(process.env.GOOGLE_EXCLUDE_SAFETY_SETTINGS)) {
|
||||
return undefined;
|
||||
}
|
||||
const mapThreshold = getThresholdMapping(model);
|
||||
const mapThreshold = getThresholdMapping(model ?? '');
|
||||
|
||||
return [
|
||||
{
|
||||
@@ -74,24 +73,27 @@ function getSafetySettings(model) {
|
||||
* Replicates core logic from GoogleClient's constructor and setOptions, plus client determination.
|
||||
* Returns an object with the provider label and the final options that would be passed to createLLM.
|
||||
*
|
||||
* @param {string | object} credentials - Either a JSON string or an object containing Google keys
|
||||
* @param {object} [options={}] - The same shape as the "GoogleClient" constructor options
|
||||
* @param credentials - Either a JSON string or an object containing Google keys
|
||||
* @param options - The same shape as the "GoogleClient" constructor options
|
||||
*/
|
||||
|
||||
function getLLMConfig(credentials, options = {}) {
|
||||
// 1. Parse credentials
|
||||
let creds = {};
|
||||
export function getGoogleConfig(
|
||||
credentials: string | t.GoogleCredentials | undefined,
|
||||
options: t.GoogleConfigOptions = {},
|
||||
) {
|
||||
let creds: t.GoogleCredentials = {};
|
||||
if (typeof credentials === 'string') {
|
||||
try {
|
||||
creds = JSON.parse(credentials);
|
||||
} catch (err) {
|
||||
throw new Error(`Error parsing string credentials: ${err.message}`);
|
||||
} catch (err: unknown) {
|
||||
throw new Error(
|
||||
`Error parsing string credentials: ${err instanceof Error ? err.message : 'Unknown error'}`,
|
||||
);
|
||||
}
|
||||
} else if (credentials && typeof credentials === 'object') {
|
||||
creds = credentials;
|
||||
}
|
||||
|
||||
// Extract from credentials
|
||||
const serviceKeyRaw = creds[AuthKeys.GOOGLE_SERVICE_KEY] ?? {};
|
||||
const serviceKey =
|
||||
typeof serviceKeyRaw === 'string' ? JSON.parse(serviceKeyRaw) : (serviceKeyRaw ?? {});
|
||||
@@ -102,9 +104,15 @@ function getLLMConfig(credentials, options = {}) {
|
||||
const reverseProxyUrl = options.reverseProxyUrl;
|
||||
const authHeader = options.authHeader;
|
||||
|
||||
/** @type {GoogleClientOptions | VertexAIClientOptions} */
|
||||
let llmConfig = {
|
||||
...(options.modelOptions || {}),
|
||||
const {
|
||||
thinking = googleSettings.thinking.default,
|
||||
thinkingBudget = googleSettings.thinkingBudget.default,
|
||||
...modelOptions
|
||||
} = options.modelOptions || {};
|
||||
|
||||
const llmConfig: GoogleClientOptions | VertexAIClientOptions = {
|
||||
...(modelOptions || {}),
|
||||
model: modelOptions?.model ?? '',
|
||||
maxRetries: 2,
|
||||
};
|
||||
|
||||
@@ -121,16 +129,30 @@ function getLLMConfig(credentials, options = {}) {
|
||||
|
||||
// If we have a GCP project => Vertex AI
|
||||
if (project_id && provider === Providers.VERTEXAI) {
|
||||
/** @type {VertexAIClientOptions['authOptions']} */
|
||||
llmConfig.authOptions = {
|
||||
(llmConfig as VertexAIClientOptions).authOptions = {
|
||||
credentials: { ...serviceKey },
|
||||
projectId: project_id,
|
||||
};
|
||||
llmConfig.location = process.env.GOOGLE_LOC || 'us-central1';
|
||||
(llmConfig as VertexAIClientOptions).location = process.env.GOOGLE_LOC || 'us-central1';
|
||||
} else if (apiKey && provider === Providers.GOOGLE) {
|
||||
llmConfig.apiKey = apiKey;
|
||||
}
|
||||
|
||||
const shouldEnableThinking =
|
||||
thinking && thinkingBudget != null && (thinkingBudget > 0 || thinkingBudget === -1);
|
||||
|
||||
if (shouldEnableThinking && provider === Providers.GOOGLE) {
|
||||
(llmConfig as GoogleClientOptions).thinkingConfig = {
|
||||
thinkingBudget: thinking ? thinkingBudget : googleSettings.thinkingBudget.default,
|
||||
includeThoughts: Boolean(thinking),
|
||||
};
|
||||
} else if (shouldEnableThinking && provider === Providers.VERTEXAI) {
|
||||
(llmConfig as VertexAIClientOptions).thinkingBudget = thinking
|
||||
? thinkingBudget
|
||||
: googleSettings.thinkingBudget.default;
|
||||
(llmConfig as VertexAIClientOptions).includeThoughts = Boolean(thinking);
|
||||
}
|
||||
|
||||
/*
|
||||
let legacyOptions = {};
|
||||
// Filter out any "examples" that are empty
|
||||
@@ -152,11 +174,11 @@ function getLLMConfig(credentials, options = {}) {
|
||||
*/
|
||||
|
||||
if (reverseProxyUrl) {
|
||||
llmConfig.baseUrl = reverseProxyUrl;
|
||||
(llmConfig as GoogleClientOptions).baseUrl = reverseProxyUrl;
|
||||
}
|
||||
|
||||
if (authHeader) {
|
||||
llmConfig.customHeaders = {
|
||||
(llmConfig as GoogleClientOptions).customHeaders = {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
};
|
||||
}
|
||||
@@ -169,8 +191,3 @@ function getLLMConfig(credentials, options = {}) {
|
||||
llmConfig,
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
getLLMConfig,
|
||||
getSafetySettings,
|
||||
};
|
||||
@@ -1 +1,2 @@
|
||||
export * from './google';
|
||||
export * from './openai';
|
||||
|
||||
@@ -1,18 +1,14 @@
|
||||
import {
|
||||
ErrorTypes,
|
||||
EModelEndpoint,
|
||||
resolveHeaders,
|
||||
mapModelToAzureConfig,
|
||||
} from 'librechat-data-provider';
|
||||
import { ErrorTypes, EModelEndpoint, mapModelToAzureConfig } from 'librechat-data-provider';
|
||||
import type {
|
||||
LLMConfigOptions,
|
||||
UserKeyValues,
|
||||
InitializeOpenAIOptionsParams,
|
||||
OpenAIOptionsResult,
|
||||
OpenAIConfigOptions,
|
||||
InitializeOpenAIOptionsParams,
|
||||
} from '~/types';
|
||||
import { createHandleLLMNewToken } from '~/utils/generators';
|
||||
import { getAzureCredentials } from '~/utils/azure';
|
||||
import { isUserProvided } from '~/utils/common';
|
||||
import { resolveHeaders } from '~/utils/env';
|
||||
import { getOpenAIConfig } from './llm';
|
||||
|
||||
/**
|
||||
@@ -68,7 +64,7 @@ export const initializeOpenAI = async ({
|
||||
? userValues?.baseURL
|
||||
: baseURLOptions[endpoint as keyof typeof baseURLOptions];
|
||||
|
||||
const clientOptions: LLMConfigOptions = {
|
||||
const clientOptions: OpenAIConfigOptions = {
|
||||
proxy: PROXY ?? undefined,
|
||||
reverseProxyUrl: baseURL || undefined,
|
||||
streaming: true,
|
||||
@@ -91,7 +87,10 @@ export const initializeOpenAI = async ({
|
||||
});
|
||||
|
||||
clientOptions.reverseProxyUrl = configBaseURL ?? clientOptions.reverseProxyUrl;
|
||||
clientOptions.headers = resolveHeaders({ ...headers, ...(clientOptions.headers ?? {}) });
|
||||
clientOptions.headers = resolveHeaders(
|
||||
{ ...headers, ...(clientOptions.headers ?? {}) },
|
||||
req.user,
|
||||
);
|
||||
|
||||
const groupName = modelGroupMap[modelName || '']?.group;
|
||||
if (groupName && groupMap[groupName]) {
|
||||
@@ -136,7 +135,7 @@ export const initializeOpenAI = async ({
|
||||
user: req.user.id,
|
||||
};
|
||||
|
||||
const finalClientOptions: LLMConfigOptions = {
|
||||
const finalClientOptions: OpenAIConfigOptions = {
|
||||
...clientOptions,
|
||||
modelOptions,
|
||||
};
|
||||
|
||||
@@ -13,7 +13,7 @@ import { isEnabled } from '~/utils/common';
|
||||
*/
|
||||
export function getOpenAIConfig(
|
||||
apiKey: string,
|
||||
options: t.LLMConfigOptions = {},
|
||||
options: t.OpenAIConfigOptions = {},
|
||||
endpoint?: string | null,
|
||||
): t.LLMConfigResult {
|
||||
const {
|
||||
|
||||
@@ -353,7 +353,11 @@ export const uploadMistralOCR = async (context: OCRContext): Promise<MistralOCRU
|
||||
documentType,
|
||||
});
|
||||
|
||||
// Process result
|
||||
if (!ocrResult || !ocrResult.pages || ocrResult.pages.length === 0) {
|
||||
throw new Error(
|
||||
'No OCR result returned from service, may be down or the file is not supported.',
|
||||
);
|
||||
}
|
||||
const { text, images } = processOCRResult(ocrResult);
|
||||
|
||||
return {
|
||||
@@ -364,7 +368,7 @@ export const uploadMistralOCR = async (context: OCRContext): Promise<MistralOCRU
|
||||
images,
|
||||
};
|
||||
} catch (error) {
|
||||
throw createOCRError(error, 'Error uploading document to Mistral OCR API');
|
||||
throw createOCRError(error, 'Error uploading document to Mistral OCR API:');
|
||||
}
|
||||
};
|
||||
|
||||
@@ -401,6 +405,12 @@ export const uploadAzureMistralOCR = async (
|
||||
documentType,
|
||||
});
|
||||
|
||||
if (!ocrResult || !ocrResult.pages || ocrResult.pages.length === 0) {
|
||||
throw new Error(
|
||||
'No OCR result returned from service, may be down or the file is not supported.',
|
||||
);
|
||||
}
|
||||
|
||||
const { text, images } = processOCRResult(ocrResult);
|
||||
|
||||
return {
|
||||
@@ -411,6 +421,6 @@ export const uploadAzureMistralOCR = async (
|
||||
images,
|
||||
};
|
||||
} catch (error) {
|
||||
throw createOCRError(error, 'Error uploading document to Azure Mistral OCR API');
|
||||
throw createOCRError(error, 'Error uploading document to Azure Mistral OCR API:');
|
||||
}
|
||||
};
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
export * from './mcp/manager';
|
||||
export * from './mcp/oauth';
|
||||
export * from './mcp/auth';
|
||||
export * from './mcp/add';
|
||||
/* Utilities */
|
||||
export * from './mcp/utils';
|
||||
export * from './utils';
|
||||
|
||||
81
packages/api/src/mcp/add.ts
Normal file
81
packages/api/src/mcp/add.ts
Normal file
@@ -0,0 +1,81 @@
|
||||
import { Request, Response } from 'express';
|
||||
import { logger } from '@librechat/data-schemas';
|
||||
|
||||
interface CreateToolRequest extends Request {
|
||||
body: {
|
||||
name: string;
|
||||
description: string;
|
||||
type: 'function' | 'code_interpreter' | 'file_search';
|
||||
metadata?: Record<string, unknown>;
|
||||
};
|
||||
user?: {
|
||||
id: string;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a new tool to the system
|
||||
* @route POST /agents/tools/add
|
||||
* @param {object} req.body - Request body containing tool data
|
||||
* @param {string} req.body.name - Tool name
|
||||
* @param {string} req.body.description - Tool description
|
||||
* @param {string} req.body.type - Tool type (function, code_interpreter, file_search)
|
||||
* @param {object} [req.body.metadata] - Optional metadata
|
||||
* @returns {object} Created tool object
|
||||
*/
|
||||
export const addTool = async (req: CreateToolRequest, res: Response) => {
|
||||
try {
|
||||
const { name, description, type, metadata } = req.body;
|
||||
|
||||
// Log the incoming request for development
|
||||
logger.info(
|
||||
'Add Tool Request:' +
|
||||
JSON.stringify({
|
||||
name,
|
||||
description,
|
||||
type,
|
||||
metadata,
|
||||
userId: req.user?.id,
|
||||
}),
|
||||
);
|
||||
|
||||
// Validate required fields
|
||||
if (!name || !description || !type) {
|
||||
return res.status(400).json({
|
||||
error: 'Missing required fields: name, description, and type are required',
|
||||
});
|
||||
}
|
||||
|
||||
// Validate tool type
|
||||
const validTypes = ['function', 'code_interpreter', 'file_search'];
|
||||
if (!validTypes.includes(type)) {
|
||||
return res.status(400).json({
|
||||
error: `Invalid tool type. Must be one of: ${validTypes.join(', ')}`,
|
||||
});
|
||||
}
|
||||
|
||||
// For now, return a mock successful response
|
||||
// TODO: Implement actual tool creation logic
|
||||
const mockTool = {
|
||||
id: `tool-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
|
||||
type,
|
||||
function: {
|
||||
name,
|
||||
description,
|
||||
},
|
||||
metadata: metadata || {},
|
||||
created_at: new Date().toISOString(),
|
||||
updated_at: new Date().toISOString(),
|
||||
};
|
||||
|
||||
logger.info('Tool created successfully:' + JSON.stringify(mockTool));
|
||||
|
||||
res.status(201).json(mockTool);
|
||||
} catch (error) {
|
||||
logger.error('Error adding tool:', error);
|
||||
res.status(500).json({
|
||||
error: 'Internal server error while adding tool',
|
||||
message: error instanceof Error ? error.message : 'Unknown error',
|
||||
});
|
||||
}
|
||||
};
|
||||
@@ -2,7 +2,7 @@ import { logger } from '@librechat/data-schemas';
|
||||
import { CallToolResultSchema, ErrorCode, McpError } from '@modelcontextprotocol/sdk/types.js';
|
||||
import type { RequestOptions } from '@modelcontextprotocol/sdk/shared/protocol.js';
|
||||
import type { OAuthClientInformation } from '@modelcontextprotocol/sdk/shared/auth.js';
|
||||
import type { JsonSchemaType, MCPOptions, TUser } from 'librechat-data-provider';
|
||||
import type { JsonSchemaType, TUser } from 'librechat-data-provider';
|
||||
import type { TokenMethods } from '@librechat/data-schemas';
|
||||
import type { FlowStateManager } from '~/flow/manager';
|
||||
import type { MCPOAuthTokens, MCPOAuthFlowMetadata } from './oauth/types';
|
||||
@@ -13,6 +13,7 @@ import { MCPOAuthHandler } from './oauth/handler';
|
||||
import { MCPTokenStorage } from './oauth/tokens';
|
||||
import { formatToolContent } from './parsers';
|
||||
import { MCPConnection } from './connection';
|
||||
import { processMCPEnv } from '~/utils/env';
|
||||
|
||||
export class MCPManager {
|
||||
private static instance: MCPManager | null = null;
|
||||
@@ -24,11 +25,6 @@ export class MCPManager {
|
||||
private userLastActivity: Map<string, number> = new Map();
|
||||
private readonly USER_CONNECTION_IDLE_TIMEOUT = 15 * 60 * 1000; // 15 minutes (TODO: make configurable)
|
||||
private mcpConfigs: t.MCPServers = {};
|
||||
private processMCPEnv?: (
|
||||
obj: MCPOptions,
|
||||
user?: TUser,
|
||||
customUserVars?: Record<string, string>,
|
||||
) => MCPOptions; // Store the processing function
|
||||
/** Store MCP server instructions */
|
||||
private serverInstructions: Map<string, string> = new Map();
|
||||
|
||||
@@ -46,14 +42,11 @@ export class MCPManager {
|
||||
mcpServers,
|
||||
flowManager,
|
||||
tokenMethods,
|
||||
processMCPEnv,
|
||||
}: {
|
||||
mcpServers: t.MCPServers;
|
||||
flowManager: FlowStateManager<MCPOAuthTokens | null>;
|
||||
tokenMethods?: TokenMethods;
|
||||
processMCPEnv?: (obj: MCPOptions) => MCPOptions;
|
||||
}): Promise<void> {
|
||||
this.processMCPEnv = processMCPEnv; // Store the function
|
||||
this.mcpConfigs = mcpServers;
|
||||
|
||||
if (!flowManager) {
|
||||
@@ -68,7 +61,7 @@ export class MCPManager {
|
||||
const connectionResults = await Promise.allSettled(
|
||||
entries.map(async ([serverName, _config], i) => {
|
||||
/** Process env for app-level connections */
|
||||
const config = this.processMCPEnv ? this.processMCPEnv(_config) : _config;
|
||||
const config = processMCPEnv(_config);
|
||||
|
||||
/** Existing tokens for system-level connections */
|
||||
let tokens: MCPOAuthTokens | null = null;
|
||||
@@ -444,9 +437,7 @@ export class MCPManager {
|
||||
);
|
||||
}
|
||||
|
||||
if (this.processMCPEnv) {
|
||||
config = { ...(this.processMCPEnv(config, user, customUserVars) ?? {}) };
|
||||
}
|
||||
config = { ...(processMCPEnv(config, user, customUserVars) ?? {}) };
|
||||
/** If no in-memory tokens, tokens from persistent storage */
|
||||
let tokens: MCPOAuthTokens | null = null;
|
||||
if (tokenMethods?.findToken) {
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
import type { TUser } from 'librechat-data-provider';
|
||||
import {
|
||||
StreamableHTTPOptionsSchema,
|
||||
StdioOptionsSchema,
|
||||
processMCPEnv,
|
||||
MCPOptions,
|
||||
} from '../src/mcp';
|
||||
StdioOptionsSchema,
|
||||
StreamableHTTPOptionsSchema,
|
||||
} from 'librechat-data-provider';
|
||||
import type { TUser } from 'librechat-data-provider';
|
||||
import { processMCPEnv } from '~/utils/env';
|
||||
|
||||
// Helper function to create test user objects
|
||||
function createTestUser(
|
||||
@@ -7,6 +7,7 @@ const RECOGNIZED_PROVIDERS = new Set([
|
||||
'xai',
|
||||
'deepseek',
|
||||
'ollama',
|
||||
'bedrock',
|
||||
]);
|
||||
const CONTENT_ARRAY_PROVIDERS = new Set(['google', 'anthropic', 'openai']);
|
||||
|
||||
|
||||
24
packages/api/src/types/google.ts
Normal file
24
packages/api/src/types/google.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import { z } from 'zod';
|
||||
import { AuthKeys, googleBaseSchema } from 'librechat-data-provider';
|
||||
|
||||
export type GoogleParameters = z.infer<typeof googleBaseSchema>;
|
||||
|
||||
export type GoogleCredentials = {
|
||||
[AuthKeys.GOOGLE_SERVICE_KEY]?: string;
|
||||
[AuthKeys.GOOGLE_API_KEY]?: string;
|
||||
};
|
||||
|
||||
/**
|
||||
* Configuration options for the getLLMConfig function
|
||||
*/
|
||||
export interface GoogleConfigOptions {
|
||||
modelOptions?: Partial<GoogleParameters>;
|
||||
reverseProxyUrl?: string;
|
||||
defaultQuery?: Record<string, string | undefined>;
|
||||
headers?: Record<string, string>;
|
||||
proxy?: string;
|
||||
streaming?: boolean;
|
||||
authHeader?: boolean;
|
||||
addParams?: Record<string, unknown>;
|
||||
dropParams?: string[];
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
export * from './azure';
|
||||
export * from './events';
|
||||
export * from './google';
|
||||
export * from './mistral';
|
||||
export * from './openai';
|
||||
export * from './run';
|
||||
|
||||
@@ -9,7 +9,7 @@ export type OpenAIParameters = z.infer<typeof openAISchema>;
|
||||
/**
|
||||
* Configuration options for the getLLMConfig function
|
||||
*/
|
||||
export interface LLMConfigOptions {
|
||||
export interface OpenAIConfigOptions {
|
||||
modelOptions?: Partial<OpenAIParameters>;
|
||||
reverseProxyUrl?: string;
|
||||
defaultQuery?: Record<string, string | undefined>;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user