* chore: update @librechat/agents to version 2.1.9 * feat: xAI standalone provider for agents * chore: bump librechat-data-provider version to 0.7.6997 * fix: reorder import statements and enhance user listing output * fix: Update Docker Compose commands to support v2 syntax with fallback * 🔧 fix: drop `reasoning_effort` for o1-preview/mini models * chore: requireLocalAuth logging * fix: edge case artifact message editing logic to handle `new` conversation IDs * fix: remove `temperature` from model options in OpenAIClient if o1-mini/preview * fix: update type annotation for fetchPromisesMap to use Promise<string[]> instead of string[] * feat: anthropic model fetching * fix: update model name to use EModelEndpoint.openAI in fetchModels and fetchOpenAIModels * fix: add error handling to modelController for loadModels * fix: add error handling and logging for model fetching in loadDefaultModels * ci: update getAnthropicModels tests to be asynchronous * feat: add user ID to model options in OpenAI and custom endpoint initialization --------- Co-authored-by: Andrei Berceanu <andreicberceanu@gmail.com> Co-authored-by: KiGamji <maloyh44@gmail.com>
83 lines
2.6 KiB
JavaScript
83 lines
2.6 KiB
JavaScript
const { EModelEndpoint } = require('librechat-data-provider');
|
|
const { useAzurePlugins } = require('~/server/services/Config/EndpointService').config;
|
|
const {
|
|
getOpenAIModels,
|
|
getGoogleModels,
|
|
getBedrockModels,
|
|
getAnthropicModels,
|
|
} = require('~/server/services/ModelService');
|
|
const { logger } = require('~/config');
|
|
|
|
/**
|
|
* Loads the default models for the application.
|
|
* @async
|
|
* @function
|
|
* @param {Express.Request} req - The Express request object.
|
|
*/
|
|
async function loadDefaultModels(req) {
|
|
try {
|
|
const [
|
|
openAI,
|
|
anthropic,
|
|
azureOpenAI,
|
|
gptPlugins,
|
|
assistants,
|
|
azureAssistants,
|
|
google,
|
|
bedrock,
|
|
] = await Promise.all([
|
|
getOpenAIModels({ user: req.user.id }).catch((error) => {
|
|
logger.error('Error fetching OpenAI models:', error);
|
|
return [];
|
|
}),
|
|
getAnthropicModels({ user: req.user.id }).catch((error) => {
|
|
logger.error('Error fetching Anthropic models:', error);
|
|
return [];
|
|
}),
|
|
getOpenAIModels({ user: req.user.id, azure: true }).catch((error) => {
|
|
logger.error('Error fetching Azure OpenAI models:', error);
|
|
return [];
|
|
}),
|
|
getOpenAIModels({ user: req.user.id, azure: useAzurePlugins, plugins: true }).catch(
|
|
(error) => {
|
|
logger.error('Error fetching Plugin models:', error);
|
|
return [];
|
|
},
|
|
),
|
|
getOpenAIModels({ assistants: true }).catch((error) => {
|
|
logger.error('Error fetching OpenAI Assistants API models:', error);
|
|
return [];
|
|
}),
|
|
getOpenAIModels({ azureAssistants: true }).catch((error) => {
|
|
logger.error('Error fetching Azure OpenAI Assistants API models:', error);
|
|
return [];
|
|
}),
|
|
Promise.resolve(getGoogleModels()).catch((error) => {
|
|
logger.error('Error getting Google models:', error);
|
|
return [];
|
|
}),
|
|
Promise.resolve(getBedrockModels()).catch((error) => {
|
|
logger.error('Error getting Bedrock models:', error);
|
|
return [];
|
|
}),
|
|
]);
|
|
|
|
return {
|
|
[EModelEndpoint.openAI]: openAI,
|
|
[EModelEndpoint.agents]: openAI,
|
|
[EModelEndpoint.google]: google,
|
|
[EModelEndpoint.anthropic]: anthropic,
|
|
[EModelEndpoint.gptPlugins]: gptPlugins,
|
|
[EModelEndpoint.azureOpenAI]: azureOpenAI,
|
|
[EModelEndpoint.assistants]: assistants,
|
|
[EModelEndpoint.azureAssistants]: azureAssistants,
|
|
[EModelEndpoint.bedrock]: bedrock,
|
|
};
|
|
} catch (error) {
|
|
logger.error('Error fetching default models:', error);
|
|
throw new Error(`Failed to load default models: ${error.message}`);
|
|
}
|
|
}
|
|
|
|
module.exports = loadDefaultModels;
|