Compare commits
92 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
94764c9c2a | ||
|
|
e9c981c202 | ||
|
|
bec1d245bd | ||
|
|
131cb6cddb | ||
|
|
a2b6e9a6a8 | ||
|
|
428fd5bed8 | ||
|
|
9cacf76c10 | ||
|
|
7b8036a369 | ||
|
|
d56817850c | ||
|
|
f88a0685f7 | ||
|
|
ae51e6153f | ||
|
|
745eef2eb0 | ||
|
|
1f8520cdad | ||
|
|
dae2805d27 | ||
|
|
ba2e95db04 | ||
|
|
2a6e000217 | ||
|
|
32281d1b8d | ||
|
|
369b1f4eba | ||
|
|
777d64088b | ||
|
|
d59a3f20cb | ||
|
|
8959576d75 | ||
|
|
dd8bc39001 | ||
|
|
4898f7489b | ||
|
|
c9c77d6fdf | ||
|
|
4dc86c4c18 | ||
|
|
6ae807c404 | ||
|
|
b5353e2640 | ||
|
|
f4f1199a55 | ||
|
|
b6028a3434 | ||
|
|
abef8c02c1 | ||
|
|
19af2b06ce | ||
|
|
2f7658e39f | ||
|
|
d3138c79fc | ||
|
|
1e49b7ecb1 | ||
|
|
3b865fbc59 | ||
|
|
afd894553c | ||
|
|
df485e5bfe | ||
|
|
77252bafc1 | ||
|
|
bd1d5e991d | ||
|
|
18c4883ae0 | ||
|
|
197307d514 | ||
|
|
130356654c | ||
|
|
8f9f09698b | ||
|
|
5da833e066 | ||
|
|
b64273957a | ||
|
|
4148c6d219 | ||
|
|
e9d68e3bef | ||
|
|
bbe690cc4b | ||
|
|
a1ad471d87 | ||
|
|
c319d709f3 | ||
|
|
6943f1c2c7 | ||
|
|
e38483a8b9 | ||
|
|
2a2e6d9991 | ||
|
|
deb1472aa5 | ||
|
|
8aa58ea240 | ||
|
|
3a112a344d | ||
|
|
712be248be | ||
|
|
530f9d303f | ||
|
|
ad29d25396 | ||
|
|
1ef53a41f0 | ||
|
|
0246f164b0 | ||
|
|
514f625b8f | ||
|
|
39ac8d3858 | ||
|
|
15987abe0a | ||
|
|
dd19323280 | ||
|
|
af47a68632 | ||
|
|
9303ea2f57 | ||
|
|
20dde44512 | ||
|
|
732a0b8029 | ||
|
|
50374f7539 | ||
|
|
1a21eb5bae | ||
|
|
1a5144be76 | ||
|
|
e5336039fc | ||
|
|
637bb6bc11 | ||
|
|
1b999108e4 | ||
|
|
3e5c5a828d | ||
|
|
e3bf674cb7 | ||
|
|
c7e57cd3a2 | ||
|
|
9e931229e2 | ||
|
|
981d009508 | ||
|
|
f5672ddcf8 | ||
|
|
747e087cf5 | ||
|
|
c17c1488ca | ||
|
|
47e5493744 | ||
|
|
13627c7f4f | ||
|
|
9e15747455 | ||
|
|
ce6490109f | ||
|
|
f0e2639269 | ||
|
|
cf3889d8e4 | ||
|
|
6efb5bd88e | ||
|
|
a64342f515 | ||
|
|
9eefa3e24c |
36
.env.example
36
.env.example
@@ -32,7 +32,7 @@ OPENAI_API_KEY="user_provided"
|
||||
# Identify the available models, separated by commas *without spaces*.
|
||||
# The first will be default.
|
||||
# Leave it blank to use internal settings.
|
||||
OPENAI_MODELS=gpt-3.5-turbo,gpt-3.5-turbo-16k,gpt-3.5-turbo-0301,text-davinci-003,gpt-4,gpt-4-0314,gpt-4-0613
|
||||
# OPENAI_MODELS=gpt-3.5-turbo,gpt-3.5-turbo-16k,gpt-3.5-turbo-0301,text-davinci-003,gpt-4,gpt-4-0314,gpt-4-0613
|
||||
|
||||
# Reverse proxy settings for OpenAI:
|
||||
# https://github.com/waylaidwanderer/node-chatgpt-api#using-a-reverse-proxy
|
||||
@@ -107,6 +107,16 @@ CHATGPT_MODELS=text-davinci-002-render-sha,gpt-4
|
||||
# By default it will use the node-chatgpt-api recommended proxy, (it's a third party server)
|
||||
# CHATGPT_REVERSE_PROXY=<YOUR REVERSE PROXY>
|
||||
|
||||
##########################
|
||||
# Anthropic Endpoint:
|
||||
##########################
|
||||
# Access key from https://console.anthropic.com/
|
||||
# Leave it blank to disable this feature.
|
||||
# Set to "user_provided" to allow the user to provide their API key from the UI.
|
||||
# Note that access to claude-1 may potentially become unavailable with the release of claude-2.
|
||||
ANTHROPIC_API_KEY="user_provided"
|
||||
ANTHROPIC_MODELS=claude-1,claude-instant-1,claude-2
|
||||
|
||||
#############################
|
||||
# Plugins:
|
||||
#############################
|
||||
@@ -114,7 +124,7 @@ CHATGPT_MODELS=text-davinci-002-render-sha,gpt-4
|
||||
# Identify the available models, separated by commas *without spaces*.
|
||||
# The first will be default.
|
||||
# Leave it blank to use internal settings.
|
||||
PLUGIN_MODELS=gpt-3.5-turbo,gpt-3.5-turbo-16k,gpt-3.5-turbo-0301,gpt-4,gpt-4-0314,gpt-4-0613
|
||||
# PLUGIN_MODELS=gpt-3.5-turbo,gpt-3.5-turbo-16k,gpt-3.5-turbo-0301,gpt-4,gpt-4-0314,gpt-4-0613
|
||||
|
||||
# For securely storing credentials, you need a fixed key and IV. You can set them here for prod and dev environments
|
||||
# If you don't set them, the app will crash on startup.
|
||||
@@ -164,6 +174,9 @@ PROXY=
|
||||
# The easiest setup for this is through docker-compose, which takes care of it for you.
|
||||
SEARCH=true
|
||||
|
||||
# HIGHLY RECOMMENDED: Disable anonymized telemetry analytics for MeiliSearch for absolute privacy.
|
||||
MEILI_NO_ANALYTICS=true
|
||||
|
||||
# REQUIRED FOR SEARCH: MeiliSearch Host, mainly for the API server to connect to the search server.
|
||||
# Replace '0.0.0.0' with 'meilisearch' if serving MeiliSearch with docker-compose.
|
||||
MEILI_HOST=http://0.0.0.0:7700
|
||||
@@ -187,6 +200,9 @@ MEILI_MASTER_KEY=DrhYf7zENyR6AlUCKmnz0eYASOQdl6zxH7s7MKFSfFCt
|
||||
# Allow Public Registration
|
||||
ALLOW_REGISTRATION=true
|
||||
|
||||
# Allow Social Registration
|
||||
ALLOW_SOCIAL_LOGIN=false
|
||||
|
||||
# JWT Secrets
|
||||
JWT_SECRET=secret
|
||||
JWT_REFRESH_SECRET=secret
|
||||
@@ -219,12 +235,20 @@ OPENID_IMAGE_URL=
|
||||
SESSION_EXPIRY=(1000 * 60 * 60 * 24) * 7
|
||||
|
||||
# Github:
|
||||
# Get the Client ID and Secret from your Github Application
|
||||
# Get the Client ID and Secret from your Discord Application
|
||||
# Add your Discord Client ID and Client Secret here:
|
||||
|
||||
GITHUB_CLIENT_ID=your_client_id
|
||||
GITHUB_CLIENT_SECRET=your_client_secret
|
||||
GITHUB_CALLBACK_URL=/oauth/github/callback # this should be the same for everyone
|
||||
|
||||
# Discord:
|
||||
# Get the Client ID and Secret from your Discord Application
|
||||
# Add your Github Client ID and Client Secret here:
|
||||
|
||||
GITHUB_CLIENT_ID=
|
||||
GITHUB_CLIENT_SECRET=
|
||||
GITHUB_CALLBACK_URL=/oauth/github/callback
|
||||
DISCORD_CLIENT_ID=your_client_id
|
||||
DISCORD_CLIENT_SECRET=your_client_secret
|
||||
DISCORD_CALLBACK_URL=/oauth/discord/callback # this should be the same for everyone
|
||||
|
||||
###########################
|
||||
# Application Domains
|
||||
|
||||
63
.eslintrc.js
63
.eslintrc.js
@@ -4,24 +4,25 @@ module.exports = {
|
||||
es2021: true,
|
||||
node: true,
|
||||
commonjs: true,
|
||||
es6: true
|
||||
es6: true,
|
||||
},
|
||||
extends: [
|
||||
'eslint:recommended',
|
||||
'plugin:react/recommended',
|
||||
'plugin:react-hooks/recommended',
|
||||
'plugin:jest/recommended',
|
||||
'prettier'
|
||||
'prettier',
|
||||
],
|
||||
ignorePatterns: ['client/dist/**/*', 'client/public/**/*', 'e2e/playwright-report/**/*'],
|
||||
parser: '@typescript-eslint/parser',
|
||||
parserOptions: {
|
||||
ecmaVersion: 'latest',
|
||||
sourceType: 'module',
|
||||
ecmaFeatures: {
|
||||
jsx: true
|
||||
}
|
||||
jsx: true,
|
||||
},
|
||||
},
|
||||
plugins: ['react', 'react-hooks', '@typescript-eslint'],
|
||||
plugins: ['react', 'react-hooks', '@typescript-eslint', 'import'],
|
||||
rules: {
|
||||
'react/react-in-jsx-scope': 'off',
|
||||
'@typescript-eslint/ban-ts-comment': ['error', { 'ts-ignore': 'allow' }],
|
||||
@@ -32,13 +33,17 @@ module.exports = {
|
||||
code: 120,
|
||||
ignoreStrings: true,
|
||||
ignoreTemplateLiterals: true,
|
||||
ignoreComments: true
|
||||
}
|
||||
ignoreComments: true,
|
||||
},
|
||||
],
|
||||
'import/no-cycle': 'error',
|
||||
'linebreak-style': 0,
|
||||
'object-curly-spacing': ['error', 'always'],
|
||||
curly: ['error', 'all'],
|
||||
semi: ['error', 'always'],
|
||||
'no-trailing-spaces': 'error',
|
||||
'no-multiple-empty-lines': ['error', { 'max': 1 }],
|
||||
'object-curly-spacing': ['error', 'always'],
|
||||
'no-multiple-empty-lines': ['error', { max: 1 }],
|
||||
'comma-dangle': ['error', 'always-multiline'],
|
||||
// "arrow-parens": [2, "as-needed", { requireForBlockBody: true }],
|
||||
// 'no-plusplus': ['error', { allowForLoopAfterthoughts: true }],
|
||||
'no-console': 'off',
|
||||
@@ -49,7 +54,7 @@ module.exports = {
|
||||
'no-restricted-syntax': 'off',
|
||||
'react/prop-types': ['off'],
|
||||
'react/display-name': ['off'],
|
||||
'quotes': ['error', 'single'],
|
||||
quotes: ['error', 'single'],
|
||||
},
|
||||
overrides: [
|
||||
{
|
||||
@@ -57,14 +62,14 @@ module.exports = {
|
||||
rules: {
|
||||
'no-unused-vars': 'off', // off because it conflicts with '@typescript-eslint/no-unused-vars'
|
||||
'react/display-name': 'off',
|
||||
'@typescript-eslint/no-unused-vars': 'warn'
|
||||
}
|
||||
'@typescript-eslint/no-unused-vars': 'warn',
|
||||
},
|
||||
},
|
||||
{
|
||||
files: ['rollup.config.js', '.eslintrc.js', 'jest.config.js'],
|
||||
env: {
|
||||
node: true,
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
files: [
|
||||
@@ -76,42 +81,30 @@ module.exports = {
|
||||
'**/*.spec.jsx',
|
||||
'**/*.spec.ts',
|
||||
'**/*.spec.tsx',
|
||||
'setupTests.js'
|
||||
'setupTests.js',
|
||||
],
|
||||
env: {
|
||||
jest: true,
|
||||
node: true
|
||||
node: true,
|
||||
},
|
||||
rules: {
|
||||
'react/display-name': 'off',
|
||||
'react/prop-types': 'off',
|
||||
'react/no-unescaped-entities': 'off'
|
||||
}
|
||||
'react/no-unescaped-entities': 'off',
|
||||
},
|
||||
},
|
||||
{
|
||||
files: '**/*.+(ts)',
|
||||
parser: '@typescript-eslint/parser',
|
||||
parserOptions: {
|
||||
project: './client/tsconfig.json'
|
||||
project: './client/tsconfig.json',
|
||||
},
|
||||
plugins: ['@typescript-eslint/eslint-plugin', 'jest'],
|
||||
extends: [
|
||||
'plugin:@typescript-eslint/eslint-recommended',
|
||||
'plugin:@typescript-eslint/recommended'
|
||||
]
|
||||
'plugin:@typescript-eslint/recommended',
|
||||
],
|
||||
},
|
||||
{
|
||||
files: './packages/data-provider/**/*.ts',
|
||||
overrides: [
|
||||
{
|
||||
files: '**/*.ts',
|
||||
parser: '@typescript-eslint/parser',
|
||||
parserOptions: {
|
||||
project: './packages/data-provider/tsconfig.json'
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
settings: {
|
||||
react: {
|
||||
@@ -119,7 +112,7 @@ module.exports = {
|
||||
// default to "createReactClass"
|
||||
pragma: 'React', // Pragma to use, default to "React"
|
||||
fragment: 'Fragment', // Fragment to use (may be a property of <pragma>), default to "Fragment"
|
||||
version: 'detect' // React version. "detect" automatically picks the version you have installed.
|
||||
}
|
||||
}
|
||||
version: 'detect', // React version. "detect" automatically picks the version you have installed.
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
6
.github/dependabot.yml
vendored
6
.github/dependabot.yml
vendored
@@ -7,7 +7,7 @@ version: 2
|
||||
updates:
|
||||
- package-ecosystem: "npm" # See documentation for possible values
|
||||
directory: "/api" # Location of package manifests
|
||||
target-branch: "develop"
|
||||
target-branch: "dev"
|
||||
versioning-strategy: increase-if-necessary
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
@@ -20,7 +20,7 @@ updates:
|
||||
include: "scope"
|
||||
- package-ecosystem: "npm" # See documentation for possible values
|
||||
directory: "/client" # Location of package manifests
|
||||
target-branch: "develop"
|
||||
target-branch: "dev"
|
||||
versioning-strategy: increase-if-necessary
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
@@ -33,7 +33,7 @@ updates:
|
||||
include: "scope"
|
||||
- package-ecosystem: "npm" # See documentation for possible values
|
||||
directory: "/" # Location of package manifests
|
||||
target-branch: "develop"
|
||||
target-branch: "dev"
|
||||
versioning-strategy: increase-if-necessary
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
5
.github/workflows/backend-review.yml
vendored
5
.github/workflows/backend-review.yml
vendored
@@ -37,3 +37,8 @@ jobs:
|
||||
|
||||
- name: Run unit tests
|
||||
run: cd api && npm run test:ci
|
||||
|
||||
- name: Run linters
|
||||
uses: wearerequired/lint-action@v2
|
||||
with:
|
||||
eslint: true
|
||||
38
.github/workflows/build.yml
vendored
Normal file
38
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
name: Linux_Container_Workflow
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
RUNNER_VERSION: 2.293.0
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# checkout the repo
|
||||
- name: 'Checkout GitHub Action'
|
||||
uses: actions/checkout@main
|
||||
|
||||
- name: 'Login via Azure CLI'
|
||||
uses: azure/login@v1
|
||||
with:
|
||||
creds: ${{ secrets.AZURE_CREDENTIALS }}
|
||||
|
||||
- name: 'Build GitHub Runner container image'
|
||||
uses: azure/docker-login@v1
|
||||
with:
|
||||
login-server: ${{ secrets.REGISTRY_LOGIN_SERVER }}
|
||||
username: ${{ secrets.REGISTRY_USERNAME }}
|
||||
password: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
- run: |
|
||||
docker build --build-arg RUNNER_VERSION=${{ env.RUNNER_VERSION }} -t ${{ secrets.REGISTRY_LOGIN_SERVER }}/pwd9000-github-runner-lin:${{ env.RUNNER_VERSION }} .
|
||||
|
||||
- name: 'Push container image to ACR'
|
||||
uses: azure/docker-login@v1
|
||||
with:
|
||||
login-server: ${{ secrets.REGISTRY_LOGIN_SERVER }}
|
||||
username: ${{ secrets.REGISTRY_USERNAME }}
|
||||
password: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
- run: |
|
||||
docker push ${{ secrets.REGISTRY_LOGIN_SERVER }}/pwd9000-github-runner-lin:${{ env.RUNNER_VERSION }}
|
||||
38
.github/workflows/deploy.yml
vendored
Normal file
38
.github/workflows/deploy.yml
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
name: Deploy_GHRunner_Linux_ACI
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
RUNNER_VERSION: 2.293.0
|
||||
ACI_RESOURCE_GROUP: 'Demo-ACI-GitHub-Runners-RG'
|
||||
ACI_NAME: 'gh-runner-linux-01'
|
||||
DNS_NAME_LABEL: 'gh-lin-01'
|
||||
GH_OWNER: ${{ github.repository_owner }}
|
||||
GH_REPOSITORY: 'LibreChat' #Change here to deploy self hosted runner ACI to another repo.
|
||||
|
||||
jobs:
|
||||
deploy-gh-runner-aci:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# checkout the repo
|
||||
- name: 'Checkout GitHub Action'
|
||||
uses: actions/checkout@main
|
||||
|
||||
- name: 'Login via Azure CLI'
|
||||
uses: azure/login@v1
|
||||
with:
|
||||
creds: ${{ secrets.AZURE_CREDENTIALS }}
|
||||
|
||||
- name: 'Deploy to Azure Container Instances'
|
||||
uses: 'azure/aci-deploy@v1'
|
||||
with:
|
||||
resource-group: ${{ env.ACI_RESOURCE_GROUP }}
|
||||
image: ${{ secrets.REGISTRY_LOGIN_SERVER }}/pwd9000-github-runner-lin:${{ env.RUNNER_VERSION }}
|
||||
registry-login-server: ${{ secrets.REGISTRY_LOGIN_SERVER }}
|
||||
registry-username: ${{ secrets.REGISTRY_USERNAME }}
|
||||
registry-password: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
name: ${{ env.ACI_NAME }}
|
||||
dns-name-label: ${{ env.DNS_NAME_LABEL }}
|
||||
environment-variables: GH_TOKEN=${{ secrets.PAT_TOKEN }} GH_OWNER=${{ env.GH_OWNER }} GH_REPOSITORY=${{ env.GH_REPOSITORY }}
|
||||
location: 'eastus'
|
||||
51
.github/workflows/dev-images.yml
vendored
Normal file
51
.github/workflows/dev-images.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
name: Docker Dev Images Build
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- 'api/**'
|
||||
- 'client/**'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
# Check out the repository
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
# Set up Docker
|
||||
- name: Set up Docker
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
# Log in to GitHub Container Registry
|
||||
- name: Log in to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Build Docker images
|
||||
- name: Build Docker images
|
||||
run: |
|
||||
cp .env.example .env
|
||||
docker build -f Dockerfile.multi --target api-build -t librechat-dev-api .
|
||||
docker build -f Dockerfile -t librechat-dev .
|
||||
|
||||
# Tag and push the images to GitHub Container Registry
|
||||
- name: Tag and push images
|
||||
run: |
|
||||
docker tag librechat-dev-api:latest ghcr.io/${{ github.repository_owner }}/librechat-dev-api:${{ github.sha }}
|
||||
docker push ghcr.io/${{ github.repository_owner }}/librechat-dev-api:${{ github.sha }}
|
||||
docker tag librechat-dev-api:latest ghcr.io/${{ github.repository_owner }}/librechat-dev-api:latest
|
||||
docker push ghcr.io/${{ github.repository_owner }}/librechat-dev-api:latest
|
||||
|
||||
docker tag librechat-dev:latest ghcr.io/${{ github.repository_owner }}/librechat-dev:${{ github.sha }}
|
||||
docker push ghcr.io/${{ github.repository_owner }}/librechat-dev:${{ github.sha }}
|
||||
docker tag librechat-dev:latest ghcr.io/${{ github.repository_owner }}/librechat-dev:latest
|
||||
docker push ghcr.io/${{ github.repository_owner }}/librechat-dev:latest
|
||||
@@ -1,11 +1,12 @@
|
||||
module.exports = {
|
||||
plugins: ['prettier-plugin-tailwindcss'],
|
||||
printWidth: 100,
|
||||
useTabs: false,
|
||||
tabWidth: 2,
|
||||
useTabs: false,
|
||||
semi: true,
|
||||
singleQuote: true,
|
||||
// bracketSpacing: false,
|
||||
trailingComma: 'none',
|
||||
trailingComma: 'all',
|
||||
arrowParens: 'always',
|
||||
embeddedLanguageFormatting: 'auto',
|
||||
insertPragma: false,
|
||||
|
||||
32
Dockerfile.multi
Normal file
32
Dockerfile.multi
Normal file
@@ -0,0 +1,32 @@
|
||||
# Build API, Client and Data Provider
|
||||
FROM node:19-alpine AS base
|
||||
|
||||
WORKDIR /app
|
||||
COPY config/loader.js ./config/
|
||||
RUN npm install dotenv
|
||||
|
||||
WORKDIR /app/api
|
||||
COPY api/package*.json ./
|
||||
COPY api/ ./
|
||||
RUN npm install
|
||||
|
||||
# React client build
|
||||
FROM base AS client-build
|
||||
WORKDIR /app/client
|
||||
COPY ./client/ ./
|
||||
|
||||
RUN npm install
|
||||
ENV NODE_OPTIONS="--max-old-space-size=2048"
|
||||
RUN npm run build
|
||||
|
||||
# Node API setup
|
||||
FROM base AS api-build
|
||||
COPY --from=client-build /app/client/dist /app/client/dist
|
||||
EXPOSE 3080
|
||||
ENV HOST=0.0.0.0
|
||||
CMD ["node", "server/index.js"]
|
||||
|
||||
# Nginx setup
|
||||
FROM nginx:1.21.1-alpine AS prod-stage
|
||||
COPY ./client/nginx.conf /etc/nginx/conf.d/default.conf
|
||||
CMD ["nginx", "-g", "daemon off;"]
|
||||
28
README.md
28
README.md
@@ -31,12 +31,15 @@ LibreChat brings together the future of assistant AIs with the revolutionary tec
|
||||
|
||||
With LibreChat, you no longer need to opt for ChatGPT Plus and can instead use free or pay-per-call APIs. We welcome contributions, cloning, and forking to enhance the capabilities of this advanced chatbot platform.
|
||||
|
||||
https://github.com/danny-avila/LibreChat/assets/110412045/c1eb0c0f-41f6-4335-b982-84b278b53d59
|
||||
<!-- https://github.com/danny-avila/LibreChat/assets/110412045/c1eb0c0f-41f6-4335-b982-84b278b53d59 -->
|
||||
|
||||
[](https://youtu.be/pNIOs1ovsXw)
|
||||
Click on the thumbnail to open the video☝️
|
||||
|
||||
# Features
|
||||
- Response streaming identical to ChatGPT through server-sent events
|
||||
- UI from original ChatGPT, including Dark mode
|
||||
- AI model selection (through 5 endpoints: OpenAI API, BingAI, ChatGPT Browser, PaLM2, Plugins)
|
||||
- AI model selection: OpenAI API, BingAI, ChatGPT Browser, PaLM2, Anthropic (Claude), Plugins
|
||||
- Create, Save, & Share custom presets - [More info on prompt presets here](https://github.com/danny-avila/LibreChat/releases/tag/v0.3.0)
|
||||
- Edit and Resubmit messages with conversation branching
|
||||
- Search all messages/conversations - [More info here](https://github.com/danny-avila/LibreChat/releases/tag/v0.1.0)
|
||||
@@ -44,7 +47,9 @@ https://github.com/danny-avila/LibreChat/assets/110412045/c1eb0c0f-41f6-4335-b98
|
||||
|
||||
---
|
||||
|
||||
## ⚠️ [Breaking Changes as of v0.5.0](docs/general_info/breaking_changes.md#v050) ⚠️
|
||||
## ⚠️ [Breaking Changes](docs/general_info/breaking_changes.md) ⚠️
|
||||
**Applies to [v0.5.4](docs/general_info/breaking_changes.md#v054) & [v0.5.5](docs/general_info/breaking_changes.md#v055)**
|
||||
|
||||
**Please read this before updating from a previous version**
|
||||
|
||||
---
|
||||
@@ -59,12 +64,15 @@ Keep up with the latest updates by visiting the releases page - [Releases](https
|
||||
<details open>
|
||||
<summary><strong>Getting Started</strong></summary>
|
||||
|
||||
* [Docker Install](docs/install/docker_install.md)
|
||||
* [Linux Install](docs/install/linux_install.md)
|
||||
* [Mac Install](docs/install/mac_install.md)
|
||||
* [Windows Install](docs/install/windows_install.md)
|
||||
* [APIs and Tokens](docs/install/apis_and_tokens.md)
|
||||
* [User Auth System](docs/install/user_auth_system.md)
|
||||
* Installation
|
||||
* [Docker Install🐳](docs/install/docker_install.md)
|
||||
* [Linux Install🐧](docs/install/linux_install.md)
|
||||
* [Mac Install🍎](docs/install/mac_install.md)
|
||||
* [Windows Install💙](docs/install/windows_install.md)
|
||||
* Configuration
|
||||
* [APIs and Tokens](docs/install/apis_and_tokens.md)
|
||||
* [User Auth System](docs/install/user_auth_system.md)
|
||||
* [Online MongoDB Database](docs/install/mongodb.md)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
@@ -85,6 +93,7 @@ Keep up with the latest updates by visiting the releases page - [Releases](https
|
||||
* [Stable Diffusion](docs/features/plugins/stable_diffusion.md)
|
||||
* [Wolfram](docs/features/plugins/wolfram.md)
|
||||
* [Make Your Own Plugin](docs/features/plugins/make_your_own.md)
|
||||
* [Using official ChatGPT Plugins](docs/features/plugins/chatgpt_plugins_openapi.md)
|
||||
|
||||
* [Proxy](docs/features/proxy.md)
|
||||
* [Bing Jailbreak](docs/features/bing_jailbreak.md)
|
||||
@@ -98,6 +107,7 @@ Keep up with the latest updates by visiting the releases page - [Releases](https
|
||||
* [Linode](docs/deployment/linode.md)
|
||||
* [Cloudflare](docs/deployment/cloudflare.md)
|
||||
* [Ngrok](docs/deployment/ngrok.md)
|
||||
* [Render](docs/deployment/render.md)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
|
||||
@@ -14,11 +14,11 @@ const askBing = async ({
|
||||
invocationId,
|
||||
toneStyle,
|
||||
token,
|
||||
onProgress
|
||||
onProgress,
|
||||
}) => {
|
||||
const { BingAIClient } = await import('@waylaidwanderer/chatgpt-api');
|
||||
const store = {
|
||||
store: new KeyvFile({ filename: './data/cache.json' })
|
||||
store: new KeyvFile({ filename: './data/cache.json' }),
|
||||
};
|
||||
|
||||
const bingAIClient = new BingAIClient({
|
||||
@@ -30,7 +30,7 @@ const askBing = async ({
|
||||
debug: false,
|
||||
cache: store,
|
||||
host: process.env.BINGAI_HOST || null,
|
||||
proxy: process.env.PROXY || null
|
||||
proxy: process.env.PROXY || null,
|
||||
});
|
||||
|
||||
let options = {};
|
||||
@@ -39,23 +39,43 @@ const askBing = async ({
|
||||
jailbreakConversationId = false;
|
||||
}
|
||||
|
||||
if (jailbreak)
|
||||
if (jailbreak) {
|
||||
options = {
|
||||
jailbreakConversationId: jailbreakConversationId || jailbreak,
|
||||
context,
|
||||
systemMessage,
|
||||
parentMessageId,
|
||||
toneStyle,
|
||||
onProgress
|
||||
onProgress,
|
||||
clientOptions: {
|
||||
features: {
|
||||
genImage: {
|
||||
server: {
|
||||
enable: true,
|
||||
type: 'markdown_list',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
else {
|
||||
} else {
|
||||
options = {
|
||||
conversationId,
|
||||
context,
|
||||
systemMessage,
|
||||
parentMessageId,
|
||||
toneStyle,
|
||||
onProgress
|
||||
onProgress,
|
||||
clientOptions: {
|
||||
features: {
|
||||
genImage: {
|
||||
server: {
|
||||
enable: true,
|
||||
type: 'markdown_list',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
// don't give those parameters for new conversation
|
||||
|
||||
@@ -10,11 +10,11 @@ const browserClient = async ({
|
||||
onProgress,
|
||||
onEventMessage,
|
||||
abortController,
|
||||
userId
|
||||
userId,
|
||||
}) => {
|
||||
const { ChatGPTBrowserClient } = await import('@waylaidwanderer/chatgpt-api');
|
||||
const store = {
|
||||
store: new KeyvFile({ filename: './data/cache.json' })
|
||||
store: new KeyvFile({ filename: './data/cache.json' }),
|
||||
};
|
||||
|
||||
const clientOptions = {
|
||||
@@ -27,7 +27,7 @@ const browserClient = async ({
|
||||
model: model,
|
||||
debug: false,
|
||||
proxy: process.env.PROXY || null,
|
||||
user: userId
|
||||
user: userId,
|
||||
};
|
||||
|
||||
const client = new ChatGPTBrowserClient(clientOptions, store);
|
||||
|
||||
324
api/app/clients/AnthropicClient.js
Normal file
324
api/app/clients/AnthropicClient.js
Normal file
@@ -0,0 +1,324 @@
|
||||
const Keyv = require('keyv');
|
||||
// const { Agent, ProxyAgent } = require('undici');
|
||||
const BaseClient = require('./BaseClient');
|
||||
const {
|
||||
encoding_for_model: encodingForModel,
|
||||
get_encoding: getEncoding,
|
||||
} = require('@dqbd/tiktoken');
|
||||
const Anthropic = require('@anthropic-ai/sdk');
|
||||
|
||||
const HUMAN_PROMPT = '\n\nHuman:';
|
||||
const AI_PROMPT = '\n\nAssistant:';
|
||||
|
||||
const tokenizersCache = {};
|
||||
|
||||
class AnthropicClient extends BaseClient {
|
||||
constructor(apiKey, options = {}, cacheOptions = {}) {
|
||||
super(apiKey, options, cacheOptions);
|
||||
cacheOptions.namespace = cacheOptions.namespace || 'anthropic';
|
||||
this.conversationsCache = new Keyv(cacheOptions);
|
||||
this.apiKey = apiKey || process.env.ANTHROPIC_API_KEY;
|
||||
this.sender = 'Anthropic';
|
||||
this.userLabel = HUMAN_PROMPT;
|
||||
this.assistantLabel = AI_PROMPT;
|
||||
this.setOptions(options);
|
||||
}
|
||||
|
||||
setOptions(options) {
|
||||
if (this.options && !this.options.replaceOptions) {
|
||||
// nested options aren't spread properly, so we need to do this manually
|
||||
this.options.modelOptions = {
|
||||
...this.options.modelOptions,
|
||||
...options.modelOptions,
|
||||
};
|
||||
delete options.modelOptions;
|
||||
// now we can merge options
|
||||
this.options = {
|
||||
...this.options,
|
||||
...options,
|
||||
};
|
||||
} else {
|
||||
this.options = options;
|
||||
}
|
||||
|
||||
const modelOptions = this.options.modelOptions || {};
|
||||
this.modelOptions = {
|
||||
...modelOptions,
|
||||
// set some good defaults (check for undefined in some cases because they may be 0)
|
||||
model: modelOptions.model || 'claude-1',
|
||||
temperature: typeof modelOptions.temperature === 'undefined' ? 0.7 : modelOptions.temperature, // 0 - 1, 0.7 is recommended
|
||||
topP: typeof modelOptions.topP === 'undefined' ? 0.7 : modelOptions.topP, // 0 - 1, default: 0.7
|
||||
topK: typeof modelOptions.topK === 'undefined' ? 40 : modelOptions.topK, // 1-40, default: 40
|
||||
stop: modelOptions.stop, // no stop method for now
|
||||
};
|
||||
|
||||
this.maxContextTokens = this.options.maxContextTokens || 99999;
|
||||
this.maxResponseTokens = this.modelOptions.maxOutputTokens || 1500;
|
||||
this.maxPromptTokens =
|
||||
this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens;
|
||||
|
||||
if (this.maxPromptTokens + this.maxResponseTokens > this.maxContextTokens) {
|
||||
throw new Error(
|
||||
`maxPromptTokens + maxOutputTokens (${this.maxPromptTokens} + ${this.maxResponseTokens} = ${
|
||||
this.maxPromptTokens + this.maxResponseTokens
|
||||
}) must be less than or equal to maxContextTokens (${this.maxContextTokens})`,
|
||||
);
|
||||
}
|
||||
|
||||
this.startToken = '||>';
|
||||
this.endToken = '';
|
||||
this.gptEncoder = this.constructor.getTokenizer('cl100k_base');
|
||||
|
||||
if (!this.modelOptions.stop) {
|
||||
const stopTokens = [this.startToken];
|
||||
if (this.endToken && this.endToken !== this.startToken) {
|
||||
stopTokens.push(this.endToken);
|
||||
}
|
||||
stopTokens.push(`${this.userLabel}`);
|
||||
stopTokens.push('<|diff_marker|>');
|
||||
|
||||
this.modelOptions.stop = stopTokens;
|
||||
}
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
getClient() {
|
||||
if (this.options.reverseProxyUrl) {
|
||||
return new Anthropic({
|
||||
apiKey: this.apiKey,
|
||||
baseURL: this.options.reverseProxyUrl,
|
||||
});
|
||||
} else {
|
||||
return new Anthropic({
|
||||
apiKey: this.apiKey,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
async buildMessages(messages, parentMessageId) {
|
||||
const orderedMessages = this.constructor.getMessagesForConversation(messages, parentMessageId);
|
||||
if (this.options.debug) {
|
||||
console.debug('AnthropicClient: orderedMessages', orderedMessages, parentMessageId);
|
||||
}
|
||||
|
||||
const formattedMessages = orderedMessages.map((message) => ({
|
||||
author: message.isCreatedByUser ? this.userLabel : this.assistantLabel,
|
||||
content: message?.content ?? message.text,
|
||||
}));
|
||||
|
||||
let identityPrefix = '';
|
||||
if (this.options.userLabel) {
|
||||
identityPrefix = `\nHuman's name: ${this.options.userLabel}`;
|
||||
}
|
||||
|
||||
if (this.options.modelLabel) {
|
||||
identityPrefix = `${identityPrefix}\nYou are ${this.options.modelLabel}`;
|
||||
}
|
||||
|
||||
let promptPrefix = (this.options.promptPrefix || '').trim();
|
||||
if (promptPrefix) {
|
||||
// If the prompt prefix doesn't end with the end token, add it.
|
||||
if (!promptPrefix.endsWith(`${this.endToken}`)) {
|
||||
promptPrefix = `${promptPrefix.trim()}${this.endToken}\n\n`;
|
||||
}
|
||||
promptPrefix = `\nContext:\n${promptPrefix}`;
|
||||
}
|
||||
|
||||
if (identityPrefix) {
|
||||
promptPrefix = `${identityPrefix}${promptPrefix}`;
|
||||
}
|
||||
|
||||
const promptSuffix = `${promptPrefix}${this.assistantLabel}\n`; // Prompt AI to respond.
|
||||
let currentTokenCount = this.getTokenCount(promptSuffix);
|
||||
|
||||
let promptBody = '';
|
||||
const maxTokenCount = this.maxPromptTokens;
|
||||
|
||||
const context = [];
|
||||
|
||||
// Iterate backwards through the messages, adding them to the prompt until we reach the max token count.
|
||||
// Do this within a recursive async function so that it doesn't block the event loop for too long.
|
||||
// Also, remove the next message when the message that puts us over the token limit is created by the user.
|
||||
// Otherwise, remove only the exceeding message. This is due to Anthropic's strict payload rule to start with "Human:".
|
||||
const nextMessage = {
|
||||
remove: false,
|
||||
tokenCount: 0,
|
||||
messageString: '',
|
||||
};
|
||||
|
||||
const buildPromptBody = async () => {
|
||||
if (currentTokenCount < maxTokenCount && formattedMessages.length > 0) {
|
||||
const message = formattedMessages.pop();
|
||||
const isCreatedByUser = message.author === this.userLabel;
|
||||
const messageString = `${message.author}\n${message.content}${this.endToken}\n`;
|
||||
let newPromptBody = `${messageString}${promptBody}`;
|
||||
|
||||
context.unshift(message);
|
||||
|
||||
const tokenCountForMessage = this.getTokenCount(messageString);
|
||||
const newTokenCount = currentTokenCount + tokenCountForMessage;
|
||||
|
||||
if (!isCreatedByUser) {
|
||||
nextMessage.messageString = messageString;
|
||||
nextMessage.tokenCount = tokenCountForMessage;
|
||||
}
|
||||
|
||||
if (newTokenCount > maxTokenCount) {
|
||||
if (!promptBody) {
|
||||
// This is the first message, so we can't add it. Just throw an error.
|
||||
throw new Error(
|
||||
`Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`,
|
||||
);
|
||||
}
|
||||
|
||||
// Otherwise, ths message would put us over the token limit, so don't add it.
|
||||
// if created by user, remove next message, otherwise remove only this message
|
||||
if (isCreatedByUser) {
|
||||
nextMessage.remove = true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
promptBody = newPromptBody;
|
||||
currentTokenCount = newTokenCount;
|
||||
// wait for next tick to avoid blocking the event loop
|
||||
await new Promise((resolve) => setImmediate(resolve));
|
||||
return buildPromptBody();
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
await buildPromptBody();
|
||||
|
||||
if (nextMessage.remove) {
|
||||
promptBody = promptBody.replace(nextMessage.messageString, '');
|
||||
currentTokenCount -= nextMessage.tokenCount;
|
||||
context.shift();
|
||||
}
|
||||
|
||||
const prompt = `${promptBody}${promptSuffix}`;
|
||||
// Add 2 tokens for metadata after all messages have been counted.
|
||||
currentTokenCount += 2;
|
||||
|
||||
// Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response.
|
||||
this.modelOptions.maxOutputTokens = Math.min(
|
||||
this.maxContextTokens - currentTokenCount,
|
||||
this.maxResponseTokens,
|
||||
);
|
||||
|
||||
return { prompt, context };
|
||||
}
|
||||
|
||||
getCompletion() {
|
||||
console.log('AnthropicClient doesn\'t use getCompletion (all handled in sendCompletion)');
|
||||
}
|
||||
|
||||
// TODO: implement abortController usage
|
||||
async sendCompletion(payload, { onProgress, abortController }) {
|
||||
if (!abortController) {
|
||||
abortController = new AbortController();
|
||||
}
|
||||
|
||||
const { signal } = abortController;
|
||||
|
||||
const modelOptions = { ...this.modelOptions };
|
||||
if (typeof onProgress === 'function') {
|
||||
modelOptions.stream = true;
|
||||
}
|
||||
|
||||
const { debug } = this.options;
|
||||
if (debug) {
|
||||
console.debug();
|
||||
console.debug(modelOptions);
|
||||
console.debug();
|
||||
}
|
||||
|
||||
const client = this.getClient();
|
||||
const metadata = {
|
||||
user_id: this.user,
|
||||
};
|
||||
|
||||
let text = '';
|
||||
const requestOptions = {
|
||||
prompt: payload,
|
||||
model: this.modelOptions.model,
|
||||
stream: this.modelOptions.stream || true,
|
||||
max_tokens_to_sample: this.modelOptions.maxOutputTokens || 1500,
|
||||
metadata,
|
||||
...modelOptions,
|
||||
};
|
||||
if (this.options.debug) {
|
||||
console.log('AnthropicClient: requestOptions');
|
||||
console.dir(requestOptions, { depth: null });
|
||||
}
|
||||
const response = await client.completions.create(requestOptions);
|
||||
|
||||
signal.addEventListener('abort', () => {
|
||||
if (this.options.debug) {
|
||||
console.log('AnthropicClient: message aborted!');
|
||||
}
|
||||
response.controller.abort();
|
||||
});
|
||||
|
||||
for await (const completion of response) {
|
||||
if (this.options.debug) {
|
||||
// Uncomment to debug message stream
|
||||
// console.debug(completion);
|
||||
}
|
||||
text += completion.completion;
|
||||
onProgress(completion.completion);
|
||||
}
|
||||
|
||||
signal.removeEventListener('abort', () => {
|
||||
if (this.options.debug) {
|
||||
console.log('AnthropicClient: message aborted!');
|
||||
}
|
||||
response.controller.abort();
|
||||
});
|
||||
|
||||
return text.trim();
|
||||
}
|
||||
|
||||
// I commented this out because I will need to refactor this for the BaseClient/all clients
|
||||
// getMessageMapMethod() {
|
||||
// return ((message) => ({
|
||||
// author: message.isCreatedByUser ? this.userLabel : this.assistantLabel,
|
||||
// content: message?.content ?? message.text
|
||||
// })).bind(this);
|
||||
// }
|
||||
|
||||
getSaveOptions() {
|
||||
return {
|
||||
promptPrefix: this.options.promptPrefix,
|
||||
modelLabel: this.options.modelLabel,
|
||||
...this.modelOptions,
|
||||
};
|
||||
}
|
||||
|
||||
getBuildMessagesOptions() {
|
||||
if (this.options.debug) {
|
||||
console.log('AnthropicClient doesn\'t use getBuildMessagesOptions');
|
||||
}
|
||||
}
|
||||
|
||||
static getTokenizer(encoding, isModelName = false, extendSpecialTokens = {}) {
|
||||
if (tokenizersCache[encoding]) {
|
||||
return tokenizersCache[encoding];
|
||||
}
|
||||
let tokenizer;
|
||||
if (isModelName) {
|
||||
tokenizer = encodingForModel(encoding, extendSpecialTokens);
|
||||
} else {
|
||||
tokenizer = getEncoding(encoding, extendSpecialTokens);
|
||||
}
|
||||
tokenizersCache[encoding] = tokenizer;
|
||||
return tokenizer;
|
||||
}
|
||||
|
||||
getTokenCount(text) {
|
||||
return this.gptEncoder.encode(text, 'all').length;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = AnthropicClient;
|
||||
@@ -14,7 +14,7 @@ class BaseClient {
|
||||
this.currentDateString = new Date().toLocaleDateString('en-us', {
|
||||
year: 'numeric',
|
||||
month: 'long',
|
||||
day: 'numeric'
|
||||
day: 'numeric',
|
||||
});
|
||||
}
|
||||
|
||||
@@ -58,7 +58,7 @@ class BaseClient {
|
||||
const responseMessageId = crypto.randomUUID();
|
||||
const saveOptions = this.getSaveOptions();
|
||||
this.abortController = opts.abortController || new AbortController();
|
||||
this.currentMessages = await this.loadHistory(conversationId, parentMessageId) ?? [];
|
||||
this.currentMessages = (await this.loadHistory(conversationId, parentMessageId)) ?? [];
|
||||
|
||||
return {
|
||||
...opts,
|
||||
@@ -71,27 +71,21 @@ class BaseClient {
|
||||
};
|
||||
}
|
||||
|
||||
createUserMessage({ messageId, parentMessageId, conversationId, text}) {
|
||||
createUserMessage({ messageId, parentMessageId, conversationId, text }) {
|
||||
const userMessage = {
|
||||
messageId,
|
||||
parentMessageId,
|
||||
conversationId,
|
||||
sender: 'User',
|
||||
text,
|
||||
isCreatedByUser: true
|
||||
isCreatedByUser: true,
|
||||
};
|
||||
return userMessage;
|
||||
}
|
||||
|
||||
async handleStartMethods(message, opts) {
|
||||
const {
|
||||
user,
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
userMessageId,
|
||||
responseMessageId,
|
||||
saveOptions,
|
||||
} = await this.setMessageOptions(opts);
|
||||
const { user, conversationId, parentMessageId, userMessageId, responseMessageId, saveOptions } =
|
||||
await this.setMessageOptions(opts);
|
||||
|
||||
const userMessage = this.createUserMessage({
|
||||
messageId: userMessageId,
|
||||
@@ -104,7 +98,7 @@ class BaseClient {
|
||||
opts.getIds({
|
||||
userMessage,
|
||||
conversationId,
|
||||
responseMessageId
|
||||
responseMessageId,
|
||||
});
|
||||
}
|
||||
|
||||
@@ -189,24 +183,32 @@ class BaseClient {
|
||||
|
||||
async refineMessages(messagesToRefine, remainingContextTokens) {
|
||||
const model = new ChatOpenAI({ temperature: 0 });
|
||||
const chain = loadSummarizationChain(model, { type: 'refine', verbose: this.options.debug, refinePrompt });
|
||||
const chain = loadSummarizationChain(model, {
|
||||
type: 'refine',
|
||||
verbose: this.options.debug,
|
||||
refinePrompt,
|
||||
});
|
||||
const splitter = new RecursiveCharacterTextSplitter({
|
||||
chunkSize: 1500,
|
||||
chunkOverlap: 100,
|
||||
});
|
||||
const userMessages = this.concatenateMessages(messagesToRefine.filter(m => m.role === 'user'));
|
||||
const assistantMessages = this.concatenateMessages(messagesToRefine.filter(m => m.role !== 'user'));
|
||||
const userDocs = await splitter.createDocuments([userMessages],[],{
|
||||
const userMessages = this.concatenateMessages(
|
||||
messagesToRefine.filter((m) => m.role === 'user'),
|
||||
);
|
||||
const assistantMessages = this.concatenateMessages(
|
||||
messagesToRefine.filter((m) => m.role !== 'user'),
|
||||
);
|
||||
const userDocs = await splitter.createDocuments([userMessages], [], {
|
||||
chunkHeader: 'DOCUMENT NAME: User Message\n\n---\n\n',
|
||||
appendChunkOverlapHeader: true,
|
||||
});
|
||||
const assistantDocs = await splitter.createDocuments([assistantMessages],[],{
|
||||
const assistantDocs = await splitter.createDocuments([assistantMessages], [], {
|
||||
chunkHeader: 'DOCUMENT NAME: Assistant Message\n\n---\n\n',
|
||||
appendChunkOverlapHeader: true,
|
||||
});
|
||||
// const chunkSize = Math.round(concatenatedMessages.length / 512);
|
||||
const input_documents = userDocs.concat(assistantDocs);
|
||||
if (this.options.debug ) {
|
||||
if (this.options.debug) {
|
||||
console.debug('Refining messages...');
|
||||
}
|
||||
try {
|
||||
@@ -219,11 +221,15 @@ class BaseClient {
|
||||
role: 'assistant',
|
||||
content: res.output_text,
|
||||
tokenCount: this.getTokenCount(res.output_text),
|
||||
}
|
||||
};
|
||||
|
||||
if (this.options.debug ) {
|
||||
if (this.options.debug) {
|
||||
console.debug('Refined messages', refinedMessage);
|
||||
console.debug(`remainingContextTokens: ${remainingContextTokens}, after refining: ${remainingContextTokens - refinedMessage.tokenCount}`);
|
||||
console.debug(
|
||||
`remainingContextTokens: ${remainingContextTokens}, after refining: ${
|
||||
remainingContextTokens - refinedMessage.tokenCount
|
||||
}`,
|
||||
);
|
||||
}
|
||||
|
||||
return refinedMessage;
|
||||
@@ -235,15 +241,15 @@ class BaseClient {
|
||||
}
|
||||
|
||||
/**
|
||||
* This method processes an array of messages and returns a context of messages that fit within a token limit.
|
||||
* It iterates over the messages from newest to oldest, adding them to the context until the token limit is reached.
|
||||
* If the token limit would be exceeded by adding a message, that message and possibly the previous one are added to a separate array of messages to refine.
|
||||
* The method uses `push` and `pop` operations for efficient array manipulation, and reverses the arrays at the end to maintain the original order of the messages.
|
||||
* The method also includes a mechanism to avoid blocking the event loop by waiting for the next tick after each iteration.
|
||||
*
|
||||
* @param {Array} messages - An array of messages, each with a `tokenCount` property. The messages should be ordered from oldest to newest.
|
||||
* @returns {Object} An object with three properties: `context`, `remainingContextTokens`, and `messagesToRefine`. `context` is an array of messages that fit within the token limit. `remainingContextTokens` is the number of tokens remaining within the limit after adding the messages to the context. `messagesToRefine` is an array of messages that were not added to the context because they would have exceeded the token limit.
|
||||
*/
|
||||
* This method processes an array of messages and returns a context of messages that fit within a token limit.
|
||||
* It iterates over the messages from newest to oldest, adding them to the context until the token limit is reached.
|
||||
* If the token limit would be exceeded by adding a message, that message and possibly the previous one are added to a separate array of messages to refine.
|
||||
* The method uses `push` and `pop` operations for efficient array manipulation, and reverses the arrays at the end to maintain the original order of the messages.
|
||||
* The method also includes a mechanism to avoid blocking the event loop by waiting for the next tick after each iteration.
|
||||
*
|
||||
* @param {Array} messages - An array of messages, each with a `tokenCount` property. The messages should be ordered from oldest to newest.
|
||||
* @returns {Object} An object with three properties: `context`, `remainingContextTokens`, and `messagesToRefine`. `context` is an array of messages that fit within the token limit. `remainingContextTokens` is the number of tokens remaining within the limit after adding the messages to the context. `messagesToRefine` is an array of messages that were not added to the context because they would have exceeded the token limit.
|
||||
*/
|
||||
async getMessagesWithinTokenLimit(messages) {
|
||||
let currentTokenCount = 0;
|
||||
let context = [];
|
||||
@@ -282,26 +288,22 @@ class BaseClient {
|
||||
context.push(message);
|
||||
currentTokenCount = newTokenCount;
|
||||
remainingContextTokens = this.maxContextTokens - currentTokenCount;
|
||||
await new Promise(resolve => setImmediate(resolve));
|
||||
await new Promise((resolve) => setImmediate(resolve));
|
||||
}
|
||||
|
||||
return {
|
||||
context: context.reverse(),
|
||||
remainingContextTokens,
|
||||
messagesToRefine: messagesToRefine.reverse(),
|
||||
refineIndex
|
||||
refineIndex,
|
||||
};
|
||||
}
|
||||
|
||||
async handleContextStrategy({instructions, orderedMessages, formattedMessages}) {
|
||||
async handleContextStrategy({ instructions, orderedMessages, formattedMessages }) {
|
||||
let payload = this.addInstructions(formattedMessages, instructions);
|
||||
let orderedWithInstructions = this.addInstructions(orderedMessages, instructions);
|
||||
let {
|
||||
context,
|
||||
remainingContextTokens,
|
||||
messagesToRefine,
|
||||
refineIndex
|
||||
} = await this.getMessagesWithinTokenLimit(payload);
|
||||
let { context, remainingContextTokens, messagesToRefine, refineIndex } =
|
||||
await this.getMessagesWithinTokenLimit(payload);
|
||||
|
||||
payload = context;
|
||||
let refinedMessage;
|
||||
@@ -325,8 +327,14 @@ class BaseClient {
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('<---------------------------------DIFF--------------------------------->');
|
||||
console.debug(`Difference between payload (${payload.length}) and orderedWithInstructions (${orderedWithInstructions.length}): ${diff}`);
|
||||
console.debug('remainingContextTokens, this.maxContextTokens (1/2)', remainingContextTokens, this.maxContextTokens);
|
||||
console.debug(
|
||||
`Difference between payload (${payload.length}) and orderedWithInstructions (${orderedWithInstructions.length}): ${diff}`,
|
||||
);
|
||||
console.debug(
|
||||
'remainingContextTokens, this.maxContextTokens (1/2)',
|
||||
remainingContextTokens,
|
||||
this.maxContextTokens,
|
||||
);
|
||||
}
|
||||
|
||||
// If the difference is positive, slice the orderedWithInstructions array
|
||||
@@ -341,7 +349,11 @@ class BaseClient {
|
||||
}
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('remainingContextTokens, this.maxContextTokens (2/2)', remainingContextTokens, this.maxContextTokens);
|
||||
console.debug(
|
||||
'remainingContextTokens, this.maxContextTokens (2/2)',
|
||||
remainingContextTokens,
|
||||
this.maxContextTokens,
|
||||
);
|
||||
}
|
||||
|
||||
let tokenCountMap = orderedWithInstructions.reduce((map, message, index) => {
|
||||
@@ -350,7 +362,7 @@ class BaseClient {
|
||||
}
|
||||
|
||||
if (index === refineIndex) {
|
||||
map.refined = { ...refinedMessage, messageId: message.messageId};
|
||||
map.refined = { ...refinedMessage, messageId: message.messageId };
|
||||
}
|
||||
|
||||
map[message.messageId] = payload[index].tokenCount;
|
||||
@@ -370,20 +382,19 @@ class BaseClient {
|
||||
}
|
||||
|
||||
async sendMessage(message, opts = {}) {
|
||||
console.log('BaseClient: sendMessage', message, opts);
|
||||
const {
|
||||
user,
|
||||
conversationId,
|
||||
responseMessageId,
|
||||
saveOptions,
|
||||
userMessage,
|
||||
} = await this.handleStartMethods(message, opts);
|
||||
const { user, conversationId, responseMessageId, saveOptions, userMessage } =
|
||||
await this.handleStartMethods(message, opts);
|
||||
|
||||
this.user = user;
|
||||
// It's not necessary to push to currentMessages
|
||||
// depending on subclass implementation of handling messages
|
||||
this.currentMessages.push(userMessage);
|
||||
|
||||
let { prompt: payload, tokenCountMap, promptTokens } = await this.buildMessages(
|
||||
let {
|
||||
prompt: payload,
|
||||
tokenCountMap,
|
||||
promptTokens,
|
||||
} = await this.buildMessages(
|
||||
this.currentMessages,
|
||||
// When the userMessage is pushed to currentMessages, the parentMessage is the userMessageId.
|
||||
// this only matters when buildMessages is utilizing the parentMessageId, and may vary on implementation
|
||||
@@ -397,7 +408,7 @@ class BaseClient {
|
||||
}
|
||||
|
||||
if (tokenCountMap) {
|
||||
console.dir(tokenCountMap, { depth: null })
|
||||
console.dir(tokenCountMap, { depth: null });
|
||||
if (tokenCountMap[userMessage.messageId]) {
|
||||
userMessage.tokenCount = tokenCountMap[userMessage.messageId];
|
||||
console.log('userMessage.tokenCount', userMessage.tokenCount);
|
||||
@@ -457,11 +468,11 @@ class BaseClient {
|
||||
}
|
||||
|
||||
async saveMessageToDatabase(message, endpointOptions, user = null) {
|
||||
await saveMessage({ ...message, unfinished: false });
|
||||
await saveMessage({ ...message, unfinished: false, cancelled: false });
|
||||
await saveConvo(user, {
|
||||
conversationId: message.conversationId,
|
||||
endpoint: this.options.endpoint,
|
||||
...endpointOptions
|
||||
...endpointOptions,
|
||||
});
|
||||
}
|
||||
|
||||
@@ -470,12 +481,12 @@ class BaseClient {
|
||||
}
|
||||
|
||||
/**
|
||||
* Iterate through messages, building an array based on the parentMessageId.
|
||||
* Each message has an id and a parentMessageId. The parentMessageId is the id of the message that this message is a reply to.
|
||||
* @param messages
|
||||
* @param parentMessageId
|
||||
* @returns {*[]} An array containing the messages in the order they should be displayed, starting with the root message.
|
||||
*/
|
||||
* Iterate through messages, building an array based on the parentMessageId.
|
||||
* Each message has an id and a parentMessageId. The parentMessageId is the id of the message that this message is a reply to.
|
||||
* @param messages
|
||||
* @param parentMessageId
|
||||
* @returns {*[]} An array containing the messages in the order they should be displayed, starting with the root message.
|
||||
*/
|
||||
static getMessagesForConversation(messages, parentMessageId, mapMethod = null) {
|
||||
if (!messages || messages.length === 0) {
|
||||
return [];
|
||||
@@ -484,7 +495,7 @@ class BaseClient {
|
||||
const orderedMessages = [];
|
||||
let currentMessageId = parentMessageId;
|
||||
while (currentMessageId) {
|
||||
const message = messages.find(msg => {
|
||||
const message = messages.find((msg) => {
|
||||
const messageId = msg.messageId ?? msg.id;
|
||||
return messageId === currentMessageId;
|
||||
});
|
||||
@@ -503,13 +514,13 @@ class BaseClient {
|
||||
}
|
||||
|
||||
/**
|
||||
* Algorithm adapted from "6. Counting tokens for chat API calls" of
|
||||
* https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
||||
*
|
||||
* An additional 2 tokens need to be added for metadata after all messages have been counted.
|
||||
*
|
||||
* @param {*} message
|
||||
*/
|
||||
* Algorithm adapted from "6. Counting tokens for chat API calls" of
|
||||
* https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
||||
*
|
||||
* An additional 2 tokens need to be added for metadata after all messages have been counted.
|
||||
*
|
||||
* @param {*} message
|
||||
*/
|
||||
getTokenCountForMessage(message) {
|
||||
let tokensPerMessage;
|
||||
let nameAdjustment;
|
||||
@@ -534,7 +545,7 @@ class BaseClient {
|
||||
const numTokens = this.getTokenCount(value);
|
||||
|
||||
// Adjust by `nameAdjustment` tokens if the property key is 'name'
|
||||
const adjustment = (key === 'name') ? nameAdjustment : 0;
|
||||
const adjustment = key === 'name' ? nameAdjustment : 0;
|
||||
return numTokens + adjustment;
|
||||
});
|
||||
|
||||
@@ -547,4 +558,4 @@ class BaseClient {
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = BaseClient;
|
||||
module.exports = BaseClient;
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
const crypto = require('crypto');
|
||||
const Keyv = require('keyv');
|
||||
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('@dqbd/tiktoken');
|
||||
const {
|
||||
encoding_for_model: encodingForModel,
|
||||
get_encoding: getEncoding,
|
||||
} = require('@dqbd/tiktoken');
|
||||
const { fetchEventSource } = require('@waylaidwanderer/fetch-event-source');
|
||||
const { Agent, ProxyAgent } = require('undici');
|
||||
const BaseClient = require('./BaseClient');
|
||||
@@ -9,11 +12,7 @@ const CHATGPT_MODEL = 'gpt-3.5-turbo';
|
||||
const tokenizersCache = {};
|
||||
|
||||
class ChatGPTClient extends BaseClient {
|
||||
constructor(
|
||||
apiKey,
|
||||
options = {},
|
||||
cacheOptions = {},
|
||||
) {
|
||||
constructor(apiKey, options = {}, cacheOptions = {}) {
|
||||
super(apiKey, options, cacheOptions);
|
||||
|
||||
cacheOptions.namespace = cacheOptions.namespace || 'chatgpt';
|
||||
@@ -49,13 +48,16 @@ class ChatGPTClient extends BaseClient {
|
||||
model: modelOptions.model || CHATGPT_MODEL,
|
||||
temperature: typeof modelOptions.temperature === 'undefined' ? 0.8 : modelOptions.temperature,
|
||||
top_p: typeof modelOptions.top_p === 'undefined' ? 1 : modelOptions.top_p,
|
||||
presence_penalty: typeof modelOptions.presence_penalty === 'undefined' ? 1 : modelOptions.presence_penalty,
|
||||
presence_penalty:
|
||||
typeof modelOptions.presence_penalty === 'undefined' ? 1 : modelOptions.presence_penalty,
|
||||
stop: modelOptions.stop,
|
||||
};
|
||||
|
||||
this.isChatGptModel = this.modelOptions.model.startsWith('gpt-');
|
||||
const { isChatGptModel } = this;
|
||||
this.isUnofficialChatGptModel = this.modelOptions.model.startsWith('text-chat') || this.modelOptions.model.startsWith('text-davinci-002-render');
|
||||
this.isUnofficialChatGptModel =
|
||||
this.modelOptions.model.startsWith('text-chat') ||
|
||||
this.modelOptions.model.startsWith('text-davinci-002-render');
|
||||
const { isUnofficialChatGptModel } = this;
|
||||
|
||||
// Davinci models have a max context length of 4097 tokens.
|
||||
@@ -64,10 +66,15 @@ class ChatGPTClient extends BaseClient {
|
||||
// The max prompt tokens is determined by the max context tokens minus the max response tokens.
|
||||
// Earlier messages will be dropped until the prompt is within the limit.
|
||||
this.maxResponseTokens = this.modelOptions.max_tokens || 1024;
|
||||
this.maxPromptTokens = this.options.maxPromptTokens || (this.maxContextTokens - this.maxResponseTokens);
|
||||
this.maxPromptTokens =
|
||||
this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens;
|
||||
|
||||
if (this.maxPromptTokens + this.maxResponseTokens > this.maxContextTokens) {
|
||||
throw new Error(`maxPromptTokens + max_tokens (${this.maxPromptTokens} + ${this.maxResponseTokens} = ${this.maxPromptTokens + this.maxResponseTokens}) must be less than or equal to maxContextTokens (${this.maxContextTokens})`);
|
||||
throw new Error(
|
||||
`maxPromptTokens + max_tokens (${this.maxPromptTokens} + ${this.maxResponseTokens} = ${
|
||||
this.maxPromptTokens + this.maxResponseTokens
|
||||
}) must be less than or equal to maxContextTokens (${this.maxContextTokens})`,
|
||||
);
|
||||
}
|
||||
|
||||
this.userLabel = this.options.userLabel || 'User';
|
||||
@@ -249,13 +256,10 @@ class ChatGPTClient extends BaseClient {
|
||||
}
|
||||
});
|
||||
}
|
||||
const response = await fetch(
|
||||
url,
|
||||
{
|
||||
...opts,
|
||||
signal: abortController.signal,
|
||||
},
|
||||
);
|
||||
const response = await fetch(url, {
|
||||
...opts,
|
||||
signal: abortController.signal,
|
||||
});
|
||||
if (response.status !== 200) {
|
||||
const body = await response.text();
|
||||
const error = new Error(`Failed to send message. HTTP ${response.status} - ${body}`);
|
||||
@@ -299,10 +303,7 @@ ${botMessage.message}
|
||||
.trim();
|
||||
}
|
||||
|
||||
async sendMessage(
|
||||
message,
|
||||
opts = {},
|
||||
) {
|
||||
async sendMessage(message, opts = {}) {
|
||||
if (opts.clientOptions && typeof opts.clientOptions === 'object') {
|
||||
this.setOptions(opts.clientOptions);
|
||||
}
|
||||
@@ -310,9 +311,10 @@ ${botMessage.message}
|
||||
const conversationId = opts.conversationId || crypto.randomUUID();
|
||||
const parentMessageId = opts.parentMessageId || crypto.randomUUID();
|
||||
|
||||
let conversation = typeof opts.conversation === 'object'
|
||||
? opts.conversation
|
||||
: await this.conversationsCache.get(conversationId);
|
||||
let conversation =
|
||||
typeof opts.conversation === 'object'
|
||||
? opts.conversation
|
||||
: await this.conversationsCache.get(conversationId);
|
||||
|
||||
let isNewConversation = false;
|
||||
if (!conversation) {
|
||||
@@ -357,7 +359,9 @@ ${botMessage.message}
|
||||
if (progressMessage === '[DONE]') {
|
||||
return;
|
||||
}
|
||||
const token = this.isChatGptModel ? progressMessage.choices[0].delta.content : progressMessage.choices[0].text;
|
||||
const token = this.isChatGptModel
|
||||
? progressMessage.choices[0].delta.content
|
||||
: progressMessage.choices[0].text;
|
||||
// first event's delta content is always undefined
|
||||
if (!token) {
|
||||
return;
|
||||
@@ -437,10 +441,11 @@ ${botMessage.message}
|
||||
}
|
||||
promptPrefix = `${this.startToken}Instructions:\n${promptPrefix}`;
|
||||
} else {
|
||||
const currentDateString = new Date().toLocaleDateString(
|
||||
'en-us',
|
||||
{ year: 'numeric', month: 'long', day: 'numeric' },
|
||||
);
|
||||
const currentDateString = new Date().toLocaleDateString('en-us', {
|
||||
year: 'numeric',
|
||||
month: 'long',
|
||||
day: 'numeric',
|
||||
});
|
||||
promptPrefix = `${this.startToken}Instructions:\nYou are ChatGPT, a large language model trained by OpenAI. Respond conversationally.\nCurrent date: ${currentDateString}${this.endToken}\n\n`;
|
||||
}
|
||||
|
||||
@@ -459,7 +464,9 @@ ${botMessage.message}
|
||||
|
||||
let currentTokenCount;
|
||||
if (isChatGptModel) {
|
||||
currentTokenCount = this.getTokenCountForMessage(instructionsPayload) + this.getTokenCountForMessage(messagePayload);
|
||||
currentTokenCount =
|
||||
this.getTokenCountForMessage(instructionsPayload) +
|
||||
this.getTokenCountForMessage(messagePayload);
|
||||
} else {
|
||||
currentTokenCount = this.getTokenCount(`${promptPrefix}${promptSuffix}`);
|
||||
}
|
||||
@@ -473,8 +480,13 @@ ${botMessage.message}
|
||||
const buildPromptBody = async () => {
|
||||
if (currentTokenCount < maxTokenCount && orderedMessages.length > 0) {
|
||||
const message = orderedMessages.pop();
|
||||
const roleLabel = message?.isCreatedByUser || message?.role?.toLowerCase() === 'user' ? this.userLabel : this.chatGptLabel;
|
||||
const messageString = `${this.startToken}${roleLabel}:\n${message?.text ?? message?.message}${this.endToken}\n`;
|
||||
const roleLabel =
|
||||
message?.isCreatedByUser || message?.role?.toLowerCase() === 'user'
|
||||
? this.userLabel
|
||||
: this.chatGptLabel;
|
||||
const messageString = `${this.startToken}${roleLabel}:\n${
|
||||
message?.text ?? message?.message
|
||||
}${this.endToken}\n`;
|
||||
let newPromptBody;
|
||||
if (promptBody || isChatGptModel) {
|
||||
newPromptBody = `${messageString}${promptBody}`;
|
||||
@@ -496,12 +508,14 @@ ${botMessage.message}
|
||||
return false;
|
||||
}
|
||||
// This is the first message, so we can't add it. Just throw an error.
|
||||
throw new Error(`Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`);
|
||||
throw new Error(
|
||||
`Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`,
|
||||
);
|
||||
}
|
||||
promptBody = newPromptBody;
|
||||
currentTokenCount = newTokenCount;
|
||||
// wait for next tick to avoid blocking the event loop
|
||||
await new Promise(resolve => setImmediate(resolve));
|
||||
await new Promise((resolve) => setImmediate(resolve));
|
||||
return buildPromptBody();
|
||||
}
|
||||
return true;
|
||||
@@ -517,7 +531,10 @@ ${botMessage.message}
|
||||
}
|
||||
|
||||
// Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response.
|
||||
this.modelOptions.max_tokens = Math.min(this.maxContextTokens - currentTokenCount, this.maxResponseTokens);
|
||||
this.modelOptions.max_tokens = Math.min(
|
||||
this.maxContextTokens - currentTokenCount,
|
||||
this.maxResponseTokens,
|
||||
);
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug(`Prompt : ${prompt}`);
|
||||
@@ -534,13 +551,13 @@ ${botMessage.message}
|
||||
}
|
||||
|
||||
/**
|
||||
* Algorithm adapted from "6. Counting tokens for chat API calls" of
|
||||
* https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
||||
*
|
||||
* An additional 2 tokens need to be added for metadata after all messages have been counted.
|
||||
*
|
||||
* @param {*} message
|
||||
*/
|
||||
* Algorithm adapted from "6. Counting tokens for chat API calls" of
|
||||
* https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
||||
*
|
||||
* An additional 2 tokens need to be added for metadata after all messages have been counted.
|
||||
*
|
||||
* @param {*} message
|
||||
*/
|
||||
getTokenCountForMessage(message) {
|
||||
let tokensPerMessage;
|
||||
let nameAdjustment;
|
||||
@@ -558,7 +575,7 @@ ${botMessage.message}
|
||||
const numTokens = this.getTokenCount(value);
|
||||
|
||||
// Adjust by `nameAdjustment` tokens if the property key is 'name'
|
||||
const adjustment = (key === 'name') ? nameAdjustment : 0;
|
||||
const adjustment = key === 'name' ? nameAdjustment : 0;
|
||||
return numTokens + adjustment;
|
||||
});
|
||||
|
||||
@@ -567,4 +584,4 @@ ${botMessage.message}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = ChatGPTClient;
|
||||
module.exports = ChatGPTClient;
|
||||
|
||||
@@ -3,7 +3,7 @@ const { google } = require('googleapis');
|
||||
const { Agent, ProxyAgent } = require('undici');
|
||||
const {
|
||||
encoding_for_model: encodingForModel,
|
||||
get_encoding: getEncoding
|
||||
get_encoding: getEncoding,
|
||||
} = require('@dqbd/tiktoken');
|
||||
|
||||
const tokenizersCache = {};
|
||||
@@ -43,20 +43,20 @@ class GoogleClient extends BaseClient {
|
||||
// nested options aren't spread properly, so we need to do this manually
|
||||
this.options.modelOptions = {
|
||||
...this.options.modelOptions,
|
||||
...options.modelOptions
|
||||
...options.modelOptions,
|
||||
};
|
||||
delete options.modelOptions;
|
||||
// now we can merge options
|
||||
this.options = {
|
||||
...this.options,
|
||||
...options
|
||||
...options,
|
||||
};
|
||||
} else {
|
||||
this.options = options;
|
||||
}
|
||||
|
||||
this.options.examples = this.options.examples.filter(
|
||||
(obj) => obj.input.content !== '' && obj.output.content !== ''
|
||||
(obj) => obj.input.content !== '' && obj.output.content !== '',
|
||||
);
|
||||
|
||||
const modelOptions = this.options.modelOptions || {};
|
||||
@@ -66,7 +66,7 @@ class GoogleClient extends BaseClient {
|
||||
model: modelOptions.model || 'chat-bison',
|
||||
temperature: typeof modelOptions.temperature === 'undefined' ? 0.2 : modelOptions.temperature, // 0 - 1, 0.2 is recommended
|
||||
topP: typeof modelOptions.topP === 'undefined' ? 0.95 : modelOptions.topP, // 0 - 1, default: 0.95
|
||||
topK: typeof modelOptions.topK === 'undefined' ? 40 : modelOptions.topK // 1-40, default: 40
|
||||
topK: typeof modelOptions.topK === 'undefined' ? 40 : modelOptions.topK, // 1-40, default: 40
|
||||
// stop: modelOptions.stop // no stop method for now
|
||||
};
|
||||
|
||||
@@ -86,7 +86,7 @@ class GoogleClient extends BaseClient {
|
||||
throw new Error(
|
||||
`maxPromptTokens + maxOutputTokens (${this.maxPromptTokens} + ${this.maxResponseTokens} = ${
|
||||
this.maxPromptTokens + this.maxResponseTokens
|
||||
}) must be less than or equal to maxContextTokens (${this.maxContextTokens})`
|
||||
}) must be less than or equal to maxContextTokens (${this.maxContextTokens})`,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -105,7 +105,7 @@ class GoogleClient extends BaseClient {
|
||||
this.endToken = '<|im_end|>';
|
||||
this.gptEncoder = this.constructor.getTokenizer('text-davinci-003', true, {
|
||||
'<|im_start|>': 100264,
|
||||
'<|im_end|>': 100265
|
||||
'<|im_end|>': 100265,
|
||||
});
|
||||
} else {
|
||||
// Previously I was trying to use "<|endoftext|>" but there seems to be some bug with OpenAI's token counting
|
||||
@@ -143,7 +143,7 @@ class GoogleClient extends BaseClient {
|
||||
getMessageMapMethod() {
|
||||
return ((message) => ({
|
||||
author: message?.author ?? (message.isCreatedByUser ? this.userLabel : this.modelLabel),
|
||||
content: message?.content ?? message.text
|
||||
content: message?.content ?? message.text,
|
||||
})).bind(this);
|
||||
}
|
||||
|
||||
@@ -153,9 +153,9 @@ class GoogleClient extends BaseClient {
|
||||
instances: [
|
||||
{
|
||||
messages: formattedMessages,
|
||||
}
|
||||
},
|
||||
],
|
||||
parameters: this.options.modelOptions
|
||||
parameters: this.options.modelOptions,
|
||||
};
|
||||
|
||||
if (this.options.promptPrefix) {
|
||||
@@ -170,8 +170,8 @@ class GoogleClient extends BaseClient {
|
||||
if (this.isTextModel) {
|
||||
payload.instances = [
|
||||
{
|
||||
prompt: messages[messages.length -1].content
|
||||
}
|
||||
prompt: messages[messages.length - 1].content,
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
@@ -199,9 +199,9 @@ class GoogleClient extends BaseClient {
|
||||
method: 'POST',
|
||||
agent: new Agent({
|
||||
bodyTimeout: 0,
|
||||
headersTimeout: 0
|
||||
headersTimeout: 0,
|
||||
}),
|
||||
signal: abortController.signal
|
||||
signal: abortController.signal,
|
||||
};
|
||||
|
||||
if (this.options.proxy) {
|
||||
@@ -216,7 +216,9 @@ class GoogleClient extends BaseClient {
|
||||
|
||||
getSaveOptions() {
|
||||
return {
|
||||
...this.modelOptions
|
||||
promptPrefix: this.options.promptPrefix,
|
||||
modelLabel: this.options.modelLabel,
|
||||
...this.modelOptions,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -237,7 +239,7 @@ class GoogleClient extends BaseClient {
|
||||
'';
|
||||
if (blocked === true) {
|
||||
reply = `Google blocked a proper response to your message:\n${JSON.stringify(
|
||||
result.predictions[0].safetyAttributes
|
||||
result.predictions[0].safetyAttributes,
|
||||
)}${reply.length > 0 ? `\nAI Response:\n${reply}` : ''}`;
|
||||
}
|
||||
if (this.options.debug) {
|
||||
|
||||
@@ -1,9 +1,15 @@
|
||||
const BaseClient = require('./BaseClient');
|
||||
const ChatGPTClient = require('./ChatGPTClient');
|
||||
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('@dqbd/tiktoken');
|
||||
const {
|
||||
encoding_for_model: encodingForModel,
|
||||
get_encoding: getEncoding,
|
||||
} = require('@dqbd/tiktoken');
|
||||
const { maxTokensMap, genAzureChatCompletion } = require('../../utils');
|
||||
|
||||
// Cache to store Tiktoken instances
|
||||
const tokenizersCache = {};
|
||||
// Counter for keeping track of the number of tokenizer calls
|
||||
let tokenizerCallsCount = 0;
|
||||
|
||||
class OpenAIClient extends BaseClient {
|
||||
constructor(apiKey, options = {}) {
|
||||
@@ -12,7 +18,9 @@ class OpenAIClient extends BaseClient {
|
||||
this.buildPrompt = this.ChatGPTClient.buildPrompt.bind(this);
|
||||
this.getCompletion = this.ChatGPTClient.getCompletion.bind(this);
|
||||
this.sender = options.sender ?? 'ChatGPT';
|
||||
this.contextStrategy = options.contextStrategy ? options.contextStrategy.toLowerCase() : 'discard';
|
||||
this.contextStrategy = options.contextStrategy
|
||||
? options.contextStrategy.toLowerCase()
|
||||
: 'discard';
|
||||
this.shouldRefineContext = this.contextStrategy === 'refine';
|
||||
this.azure = options.azure || false;
|
||||
if (this.azure) {
|
||||
@@ -45,34 +53,45 @@ class OpenAIClient extends BaseClient {
|
||||
this.modelOptions = {
|
||||
...modelOptions,
|
||||
model: modelOptions.model || 'gpt-3.5-turbo',
|
||||
temperature: typeof modelOptions.temperature === 'undefined' ? 0.8 : modelOptions.temperature,
|
||||
temperature:
|
||||
typeof modelOptions.temperature === 'undefined' ? 0.8 : modelOptions.temperature,
|
||||
top_p: typeof modelOptions.top_p === 'undefined' ? 1 : modelOptions.top_p,
|
||||
presence_penalty: typeof modelOptions.presence_penalty === 'undefined' ? 1 : modelOptions.presence_penalty,
|
||||
presence_penalty:
|
||||
typeof modelOptions.presence_penalty === 'undefined' ? 1 : modelOptions.presence_penalty,
|
||||
stop: modelOptions.stop,
|
||||
};
|
||||
}
|
||||
|
||||
this.isChatCompletion = this.options.reverseProxyUrl || this.options.localAI || this.modelOptions.model.startsWith('gpt-');
|
||||
this.isChatCompletion =
|
||||
this.options.reverseProxyUrl ||
|
||||
this.options.localAI ||
|
||||
this.modelOptions.model.startsWith('gpt-');
|
||||
this.isChatGptModel = this.isChatCompletion;
|
||||
if (this.modelOptions.model === 'text-davinci-003') {
|
||||
this.isChatCompletion = false;
|
||||
this.isChatGptModel = false;
|
||||
}
|
||||
const { isChatGptModel } = this;
|
||||
this.isUnofficialChatGptModel = this.modelOptions.model.startsWith('text-chat') || this.modelOptions.model.startsWith('text-davinci-002-render');
|
||||
this.isUnofficialChatGptModel =
|
||||
this.modelOptions.model.startsWith('text-chat') ||
|
||||
this.modelOptions.model.startsWith('text-davinci-002-render');
|
||||
this.maxContextTokens = maxTokensMap[this.modelOptions.model] ?? 4095; // 1 less than maximum
|
||||
this.maxResponseTokens = this.modelOptions.max_tokens || 1024;
|
||||
this.maxPromptTokens = this.options.maxPromptTokens || (this.maxContextTokens - this.maxResponseTokens);
|
||||
this.maxPromptTokens =
|
||||
this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens;
|
||||
|
||||
if (this.maxPromptTokens + this.maxResponseTokens > this.maxContextTokens) {
|
||||
throw new Error(`maxPromptTokens + max_tokens (${this.maxPromptTokens} + ${this.maxResponseTokens} = ${this.maxPromptTokens + this.maxResponseTokens}) must be less than or equal to maxContextTokens (${this.maxContextTokens})`);
|
||||
throw new Error(
|
||||
`maxPromptTokens + max_tokens (${this.maxPromptTokens} + ${this.maxResponseTokens} = ${
|
||||
this.maxPromptTokens + this.maxResponseTokens
|
||||
}) must be less than or equal to maxContextTokens (${this.maxContextTokens})`,
|
||||
);
|
||||
}
|
||||
|
||||
this.userLabel = this.options.userLabel || 'User';
|
||||
this.chatGptLabel = this.options.chatGptLabel || 'Assistant';
|
||||
|
||||
this.setupTokens();
|
||||
this.setupTokenizer();
|
||||
|
||||
if (!this.modelOptions.stop) {
|
||||
const stopTokens = [this.startToken];
|
||||
@@ -116,68 +135,87 @@ class OpenAIClient extends BaseClient {
|
||||
}
|
||||
}
|
||||
|
||||
setupTokenizer() {
|
||||
// Selects an appropriate tokenizer based on the current configuration of the client instance.
|
||||
// It takes into account factors such as whether it's a chat completion, an unofficial chat GPT model, etc.
|
||||
selectTokenizer() {
|
||||
let tokenizer;
|
||||
this.encoding = 'text-davinci-003';
|
||||
if (this.isChatCompletion) {
|
||||
this.encoding = 'cl100k_base';
|
||||
this.gptEncoder = this.constructor.getTokenizer(this.encoding);
|
||||
tokenizer = this.constructor.getTokenizer(this.encoding);
|
||||
} else if (this.isUnofficialChatGptModel) {
|
||||
this.gptEncoder = this.constructor.getTokenizer(this.encoding, true, {
|
||||
const extendSpecialTokens = {
|
||||
'<|im_start|>': 100264,
|
||||
'<|im_end|>': 100265,
|
||||
});
|
||||
};
|
||||
tokenizer = this.constructor.getTokenizer(this.encoding, true, extendSpecialTokens);
|
||||
} else {
|
||||
try {
|
||||
this.encoding = this.modelOptions.model;
|
||||
this.gptEncoder = this.constructor.getTokenizer(this.modelOptions.model, true);
|
||||
tokenizer = this.constructor.getTokenizer(this.modelOptions.model, true);
|
||||
} catch {
|
||||
this.gptEncoder = this.constructor.getTokenizer(this.encoding, true);
|
||||
tokenizer = this.constructor.getTokenizer(this.encoding, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static getTokenizer(encoding, isModelName = false, extendSpecialTokens = {}) {
|
||||
if (tokenizersCache[encoding]) {
|
||||
return tokenizersCache[encoding];
|
||||
}
|
||||
let tokenizer;
|
||||
if (isModelName) {
|
||||
tokenizer = encodingForModel(encoding, extendSpecialTokens);
|
||||
} else {
|
||||
tokenizer = getEncoding(encoding, extendSpecialTokens);
|
||||
}
|
||||
tokenizersCache[encoding] = tokenizer;
|
||||
return tokenizer;
|
||||
}
|
||||
|
||||
freeAndResetEncoder() {
|
||||
try {
|
||||
if (!this.gptEncoder) {
|
||||
return;
|
||||
// Retrieves a tokenizer either from the cache or creates a new one if one doesn't exist in the cache.
|
||||
// If a tokenizer is being created, it's also added to the cache.
|
||||
static getTokenizer(encoding, isModelName = false, extendSpecialTokens = {}) {
|
||||
let tokenizer;
|
||||
if (tokenizersCache[encoding]) {
|
||||
tokenizer = tokenizersCache[encoding];
|
||||
} else {
|
||||
if (isModelName) {
|
||||
tokenizer = encodingForModel(encoding, extendSpecialTokens);
|
||||
} else {
|
||||
tokenizer = getEncoding(encoding, extendSpecialTokens);
|
||||
}
|
||||
this.gptEncoder.free();
|
||||
delete tokenizersCache[this.encoding];
|
||||
delete tokenizersCache.count;
|
||||
this.setupTokenizer();
|
||||
tokenizersCache[encoding] = tokenizer;
|
||||
}
|
||||
return tokenizer;
|
||||
}
|
||||
|
||||
// Frees all encoders in the cache and resets the count.
|
||||
static freeAndResetAllEncoders() {
|
||||
try {
|
||||
Object.keys(tokenizersCache).forEach((key) => {
|
||||
if (tokenizersCache[key]) {
|
||||
tokenizersCache[key].free();
|
||||
delete tokenizersCache[key];
|
||||
}
|
||||
});
|
||||
// Reset count
|
||||
tokenizerCallsCount = 1;
|
||||
} catch (error) {
|
||||
console.log('freeAndResetEncoder error');
|
||||
console.log('Free and reset encoders error');
|
||||
console.error(error);
|
||||
}
|
||||
}
|
||||
|
||||
getTokenCount(text) {
|
||||
try {
|
||||
if (tokenizersCache.count >= 25) {
|
||||
if (this.options.debug) {
|
||||
console.debug('freeAndResetEncoder: reached 25 encodings, reseting...');
|
||||
}
|
||||
this.freeAndResetEncoder();
|
||||
// Checks if the cache of tokenizers has reached a certain size. If it has, it frees and resets all tokenizers.
|
||||
resetTokenizersIfNecessary() {
|
||||
if (tokenizerCallsCount >= 25) {
|
||||
if (this.options.debug) {
|
||||
console.debug('freeAndResetAllEncoders: reached 25 encodings, resetting...');
|
||||
}
|
||||
tokenizersCache.count = (tokenizersCache.count || 0) + 1;
|
||||
return this.gptEncoder.encode(text, 'all').length;
|
||||
this.constructor.freeAndResetAllEncoders();
|
||||
}
|
||||
tokenizerCallsCount++;
|
||||
}
|
||||
|
||||
// Returns the token count of a given text. It also checks and resets the tokenizers if necessary.
|
||||
getTokenCount(text) {
|
||||
this.resetTokenizersIfNecessary();
|
||||
try {
|
||||
const tokenizer = this.selectTokenizer();
|
||||
return tokenizer.encode(text, 'all').length;
|
||||
} catch (error) {
|
||||
this.freeAndResetEncoder();
|
||||
return this.gptEncoder.encode(text, 'all').length;
|
||||
this.constructor.freeAndResetAllEncoders();
|
||||
const tokenizer = this.selectTokenizer();
|
||||
return tokenizer.encode(text, 'all').length;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -185,7 +223,7 @@ class OpenAIClient extends BaseClient {
|
||||
return {
|
||||
chatGptLabel: this.options.chatGptLabel,
|
||||
promptPrefix: this.options.promptPrefix,
|
||||
...this.modelOptions
|
||||
...this.modelOptions,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -197,9 +235,16 @@ class OpenAIClient extends BaseClient {
|
||||
};
|
||||
}
|
||||
|
||||
async buildMessages(messages, parentMessageId, { isChatCompletion = false, promptPrefix = null }) {
|
||||
async buildMessages(
|
||||
messages,
|
||||
parentMessageId,
|
||||
{ isChatCompletion = false, promptPrefix = null },
|
||||
) {
|
||||
if (!isChatCompletion) {
|
||||
return await this.buildPrompt(messages, parentMessageId, { isChatGptModel: isChatCompletion, promptPrefix });
|
||||
return await this.buildPrompt(messages, parentMessageId, {
|
||||
isChatGptModel: isChatCompletion,
|
||||
promptPrefix,
|
||||
});
|
||||
}
|
||||
|
||||
let payload;
|
||||
@@ -214,7 +259,7 @@ class OpenAIClient extends BaseClient {
|
||||
instructions = {
|
||||
role: 'system',
|
||||
name: 'instructions',
|
||||
content: promptPrefix
|
||||
content: promptPrefix,
|
||||
};
|
||||
|
||||
if (this.contextStrategy) {
|
||||
@@ -236,7 +281,8 @@ class OpenAIClient extends BaseClient {
|
||||
}
|
||||
|
||||
if (this.contextStrategy) {
|
||||
formattedMessage.tokenCount = message.tokenCount ?? this.getTokenCountForMessage(formattedMessage);
|
||||
formattedMessage.tokenCount =
|
||||
message.tokenCount ?? this.getTokenCountForMessage(formattedMessage);
|
||||
}
|
||||
|
||||
return formattedMessage;
|
||||
@@ -244,8 +290,11 @@ class OpenAIClient extends BaseClient {
|
||||
|
||||
// TODO: need to handle interleaving instructions better
|
||||
if (this.contextStrategy) {
|
||||
({ payload, tokenCountMap, promptTokens, messages } =
|
||||
await this.handleContextStrategy({ instructions, orderedMessages, formattedMessages }));
|
||||
({ payload, tokenCountMap, promptTokens, messages } = await this.handleContextStrategy({
|
||||
instructions,
|
||||
orderedMessages,
|
||||
formattedMessages,
|
||||
}));
|
||||
}
|
||||
|
||||
const result = {
|
||||
@@ -272,8 +321,9 @@ class OpenAIClient extends BaseClient {
|
||||
if (progressMessage === '[DONE]') {
|
||||
return;
|
||||
}
|
||||
const token =
|
||||
this.isChatCompletion ? progressMessage.choices?.[0]?.delta?.content : progressMessage.choices?.[0]?.text;
|
||||
const token = this.isChatCompletion
|
||||
? progressMessage.choices?.[0]?.delta?.content
|
||||
: progressMessage.choices?.[0]?.text;
|
||||
// first event's delta content is always undefined
|
||||
if (!token) {
|
||||
return;
|
||||
|
||||
@@ -2,14 +2,11 @@ const OpenAIClient = require('./OpenAIClient');
|
||||
const { ChatOpenAI } = require('langchain/chat_models/openai');
|
||||
const { CallbackManager } = require('langchain/callbacks');
|
||||
const { initializeCustomAgent, initializeFunctionsAgent } = require('./agents/');
|
||||
const { findMessageContent } = require('../../utils');
|
||||
const { loadTools } = require('./tools/util');
|
||||
const { SelfReflectionTool } = require('./tools/');
|
||||
const { HumanChatMessage, AIChatMessage } = require('langchain/schema');
|
||||
const {
|
||||
instructions,
|
||||
imageInstructions,
|
||||
errorInstructions,
|
||||
} = require('./prompts/instructions');
|
||||
const { instructions, imageInstructions, errorInstructions } = require('./prompts/instructions');
|
||||
|
||||
class PluginsClient extends OpenAIClient {
|
||||
constructor(apiKey, options = {}) {
|
||||
@@ -28,11 +25,13 @@ class PluginsClient extends OpenAIClient {
|
||||
|
||||
if (actions[0]?.action && this.functionsAgent) {
|
||||
actions = actions.map((step) => ({
|
||||
log: `Action: ${step.action?.tool || ''}\nInput: ${JSON.stringify(step.action?.toolInput) || ''}\nObservation: ${step.observation}`
|
||||
log: `Action: ${step.action?.tool || ''}\nInput: ${
|
||||
JSON.stringify(step.action?.toolInput) || ''
|
||||
}\nObservation: ${step.observation}`,
|
||||
}));
|
||||
} else if (actions[0]?.action) {
|
||||
actions = actions.map((step) => ({
|
||||
log: `${step.action.log}\nObservation: ${step.observation}`
|
||||
log: `${step.action.log}\nObservation: ${step.observation}`,
|
||||
}));
|
||||
}
|
||||
|
||||
@@ -114,8 +113,8 @@ Only respond with your conversational reply to the following User Message:
|
||||
super.setOptions(options);
|
||||
this.isGpt3 = this.modelOptions.model.startsWith('gpt-3');
|
||||
|
||||
if (this.reverseProxyUrl) {
|
||||
this.langchainProxy = this.reverseProxyUrl.match(/.*v1/)[0];
|
||||
if (this.options.reverseProxyUrl) {
|
||||
this.langchainProxy = this.options.reverseProxyUrl.match(/.*v1/)[0];
|
||||
}
|
||||
}
|
||||
|
||||
@@ -133,14 +132,13 @@ Only respond with your conversational reply to the following User Message:
|
||||
}
|
||||
|
||||
getFunctionModelName(input) {
|
||||
const prefixMap = {
|
||||
'gpt-4': 'gpt-4-0613',
|
||||
'gpt-4-32k': 'gpt-4-32k-0613',
|
||||
'gpt-3.5-turbo': 'gpt-3.5-turbo-0613'
|
||||
};
|
||||
|
||||
const prefix = Object.keys(prefixMap).find(key => input.startsWith(key));
|
||||
return prefix ? prefixMap[prefix] : 'gpt-3.5-turbo-0613';
|
||||
if (input.startsWith('gpt-3.5-turbo')) {
|
||||
return 'gpt-3.5-turbo';
|
||||
} else if (input.startsWith('gpt-4')) {
|
||||
return 'gpt-4';
|
||||
} else {
|
||||
return 'gpt-3.5-turbo';
|
||||
}
|
||||
}
|
||||
|
||||
getBuildMessagesOptions(opts) {
|
||||
@@ -173,7 +171,7 @@ Only respond with your conversational reply to the following User Message:
|
||||
async initialize({ user, message, onAgentAction, onChainEnd, signal }) {
|
||||
const modelOptions = {
|
||||
modelName: this.agentOptions.model,
|
||||
temperature: this.agentOptions.temperature
|
||||
temperature: this.agentOptions.temperature,
|
||||
};
|
||||
|
||||
const configOptions = {};
|
||||
@@ -185,7 +183,9 @@ Only respond with your conversational reply to the following User Message:
|
||||
const model = this.createLLM(modelOptions, configOptions);
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug(`<-----Agent Model: ${model.modelName} | Temp: ${model.temperature}----->`);
|
||||
console.debug(
|
||||
`<-----Agent Model: ${model.modelName} | Temp: ${model.temperature} | Functions: ${this.functionsAgent}----->`,
|
||||
);
|
||||
}
|
||||
|
||||
this.availableTools = await loadTools({
|
||||
@@ -194,8 +194,10 @@ Only respond with your conversational reply to the following User Message:
|
||||
tools: this.options.tools,
|
||||
functions: this.functionsAgent,
|
||||
options: {
|
||||
openAIApiKey: this.openAIApiKey
|
||||
}
|
||||
openAIApiKey: this.openAIApiKey,
|
||||
debug: this.options?.debug,
|
||||
message,
|
||||
},
|
||||
});
|
||||
// load tools
|
||||
for (const tool of this.options.tools) {
|
||||
@@ -235,10 +237,13 @@ Only respond with your conversational reply to the following User Message:
|
||||
};
|
||||
|
||||
// Map Messages to Langchain format
|
||||
const pastMessages = this.currentMessages.slice(0, -1).map(
|
||||
msg => msg?.isCreatedByUser || msg?.role?.toLowerCase() === 'user'
|
||||
? new HumanChatMessage(msg.text)
|
||||
: new AIChatMessage(msg.text));
|
||||
const pastMessages = this.currentMessages
|
||||
.slice(0, -1)
|
||||
.map((msg) =>
|
||||
msg?.isCreatedByUser || msg?.role?.toLowerCase() === 'user'
|
||||
? new HumanChatMessage(msg.text)
|
||||
: new AIChatMessage(msg.text),
|
||||
);
|
||||
|
||||
// initialize agent
|
||||
const initializer = this.functionsAgent ? initializeFunctionsAgent : initializeCustomAgent;
|
||||
@@ -258,13 +263,22 @@ Only respond with your conversational reply to the following User Message:
|
||||
if (typeof onChainEnd === 'function') {
|
||||
onChainEnd(action);
|
||||
}
|
||||
}
|
||||
})
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('Loaded agent.');
|
||||
}
|
||||
|
||||
onAgentAction(
|
||||
{
|
||||
tool: 'self-reflection',
|
||||
toolInput: `Processing the User's message:\n"${message}"`,
|
||||
log: '',
|
||||
},
|
||||
true,
|
||||
);
|
||||
}
|
||||
|
||||
async executorCall(message, signal) {
|
||||
@@ -289,6 +303,11 @@ Only respond with your conversational reply to the following User Message:
|
||||
} catch (err) {
|
||||
console.error(err);
|
||||
errorMessage = err.message;
|
||||
const content = findMessageContent(message);
|
||||
if (content) {
|
||||
errorMessage = content;
|
||||
break;
|
||||
}
|
||||
if (attempts === maxAttempts) {
|
||||
this.result.output = `Encountered an error while attempting to respond. Error: ${err.message}`;
|
||||
this.result.intermediateSteps = this.actions;
|
||||
@@ -304,13 +323,18 @@ Only respond with your conversational reply to the following User Message:
|
||||
return;
|
||||
}
|
||||
|
||||
intermediateSteps.forEach(step => {
|
||||
intermediateSteps.forEach((step) => {
|
||||
const { observation } = step;
|
||||
if (!observation || !observation.includes('![')) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!responseMessage.text.includes(observation)) {
|
||||
// Extract the image file path from the observation
|
||||
const observedImagePath = observation.match(/\(\/images\/.*\.\w*\)/g)[0];
|
||||
|
||||
// Check if the responseMessage already includes the image file path
|
||||
if (!responseMessage.text.includes(observedImagePath)) {
|
||||
// If the image file path is not found, append the whole observation
|
||||
responseMessage.text += '\n' + observation;
|
||||
if (this.options.debug) {
|
||||
console.debug('added image from intermediateSteps');
|
||||
@@ -346,7 +370,12 @@ Only respond with your conversational reply to the following User Message:
|
||||
|
||||
this.currentMessages.push(userMessage);
|
||||
|
||||
let { prompt: payload, tokenCountMap, promptTokens, messages } = await this.buildMessages(
|
||||
let {
|
||||
prompt: payload,
|
||||
tokenCountMap,
|
||||
promptTokens,
|
||||
messages,
|
||||
} = await this.buildMessages(
|
||||
this.currentMessages,
|
||||
userMessage.messageId,
|
||||
this.getBuildMessagesOptions({
|
||||
@@ -356,7 +385,7 @@ Only respond with your conversational reply to the following User Message:
|
||||
);
|
||||
|
||||
if (tokenCountMap) {
|
||||
console.dir(tokenCountMap, { depth: null })
|
||||
console.dir(tokenCountMap, { depth: null });
|
||||
if (tokenCountMap[userMessage.messageId]) {
|
||||
userMessage.tokenCount = tokenCountMap[userMessage.messageId];
|
||||
console.log('userMessage.tokenCount', userMessage.tokenCount);
|
||||
@@ -389,7 +418,7 @@ Only respond with your conversational reply to the following User Message:
|
||||
message,
|
||||
onAgentAction,
|
||||
onChainEnd,
|
||||
signal: this.abortController.signal
|
||||
signal: this.abortController.signal,
|
||||
});
|
||||
await this.executorCall(message, this.abortController.signal);
|
||||
|
||||
@@ -402,7 +431,7 @@ Only respond with your conversational reply to the following User Message:
|
||||
if (this.agentOptions.skipCompletion && this.result.output) {
|
||||
responseMessage.text = this.result.output;
|
||||
this.addImages(this.result.intermediateSteps, responseMessage);
|
||||
await this.generateTextStream(this.result.output, opts.onProgress);
|
||||
await this.generateTextStream(this.result.output, opts.onProgress, { delay: 8 });
|
||||
return await this.handleResponseMessage(responseMessage, saveOptions, user);
|
||||
}
|
||||
|
||||
@@ -448,12 +477,12 @@ Only respond with your conversational reply to the following User Message:
|
||||
const instructionsPayload = {
|
||||
role: 'system',
|
||||
name: 'instructions',
|
||||
content: promptPrefix
|
||||
content: promptPrefix,
|
||||
};
|
||||
|
||||
const messagePayload = {
|
||||
role: 'system',
|
||||
content: promptSuffix
|
||||
content: promptSuffix,
|
||||
};
|
||||
|
||||
if (this.isGpt3) {
|
||||
@@ -463,13 +492,13 @@ Only respond with your conversational reply to the following User Message:
|
||||
}
|
||||
|
||||
// testing if this works with browser endpoint
|
||||
if (!this.isGpt3 && this.reverseProxyUrl) {
|
||||
if (!this.isGpt3 && this.options.reverseProxyUrl) {
|
||||
instructionsPayload.role = 'user';
|
||||
}
|
||||
|
||||
let currentTokenCount =
|
||||
this.getTokenCountForMessage(instructionsPayload) +
|
||||
this.getTokenCountForMessage(messagePayload);
|
||||
this.getTokenCountForMessage(instructionsPayload) +
|
||||
this.getTokenCountForMessage(messagePayload);
|
||||
|
||||
let promptBody = '';
|
||||
const maxTokenCount = this.maxPromptTokens;
|
||||
@@ -492,7 +521,7 @@ Only respond with your conversational reply to the following User Message:
|
||||
}
|
||||
// This is the first message, so we can't add it. Just throw an error.
|
||||
throw new Error(
|
||||
`Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`
|
||||
`Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`,
|
||||
);
|
||||
}
|
||||
promptBody = newPromptBody;
|
||||
@@ -519,7 +548,7 @@ Only respond with your conversational reply to the following User Message:
|
||||
// Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response.
|
||||
this.modelOptions.max_tokens = Math.min(
|
||||
this.maxContextTokens - currentTokenCount,
|
||||
this.maxResponseTokens
|
||||
this.maxResponseTokens,
|
||||
);
|
||||
|
||||
if (this.isGpt3) {
|
||||
|
||||
@@ -8,7 +8,7 @@ class CustomAgent extends ZeroShotAgent {
|
||||
}
|
||||
|
||||
_stop() {
|
||||
return [`\nObservation:`, `\nObservation 1:`];
|
||||
return ['\nObservation:', '\nObservation 1:'];
|
||||
}
|
||||
|
||||
static createPrompt(tools, opts = {}) {
|
||||
@@ -32,17 +32,17 @@ class CustomAgent extends ZeroShotAgent {
|
||||
.join('\n');
|
||||
const toolNames = tools.map((tool) => tool.name);
|
||||
const formatInstructions = (0, renderTemplate)(instructions, 'f-string', {
|
||||
tool_names: toolNames
|
||||
tool_names: toolNames,
|
||||
});
|
||||
const template = [
|
||||
`Date: ${currentDateString}\n${prefix}`,
|
||||
toolStrings,
|
||||
formatInstructions,
|
||||
suffix
|
||||
suffix,
|
||||
].join('\n\n');
|
||||
return new PromptTemplate({
|
||||
template,
|
||||
inputVariables
|
||||
inputVariables,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ const { BufferMemory, ChatMessageHistory } = require('langchain/memory');
|
||||
const {
|
||||
ChatPromptTemplate,
|
||||
SystemMessagePromptTemplate,
|
||||
HumanMessagePromptTemplate
|
||||
HumanMessagePromptTemplate,
|
||||
} = require('langchain/prompts');
|
||||
|
||||
const initializeCustomAgent = async ({
|
||||
@@ -22,7 +22,7 @@ const initializeCustomAgent = async ({
|
||||
new SystemMessagePromptTemplate(prompt),
|
||||
HumanMessagePromptTemplate.fromTemplate(`{chat_history}
|
||||
Query: {input}
|
||||
{agent_scratchpad}`)
|
||||
{agent_scratchpad}`),
|
||||
]);
|
||||
|
||||
const outputParser = new CustomOutputParser({ tools });
|
||||
@@ -34,18 +34,18 @@ Query: {input}
|
||||
humanPrefix: 'User',
|
||||
aiPrefix: 'Assistant',
|
||||
inputKey: 'input',
|
||||
outputKey: 'output'
|
||||
outputKey: 'output',
|
||||
});
|
||||
|
||||
const llmChain = new LLMChain({
|
||||
prompt: chatPrompt,
|
||||
llm: model
|
||||
llm: model,
|
||||
});
|
||||
|
||||
const agent = new CustomAgent({
|
||||
llmChain,
|
||||
outputParser,
|
||||
allowedTools: tools.map((tool) => tool.name)
|
||||
allowedTools: tools.map((tool) => tool.name),
|
||||
});
|
||||
|
||||
return AgentExecutor.fromAgentAndTools({ agent, tools, memory, ...rest });
|
||||
|
||||
@@ -57,7 +57,7 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
||||
const output = text.substring(finalMatch.index + finalMatch[0].length).trim();
|
||||
return {
|
||||
returnValues: { output },
|
||||
log: text
|
||||
log: text,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -66,7 +66,7 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
||||
if (!match) {
|
||||
console.log(
|
||||
'\n\n<----------------------HIT NO MATCH PARSING ERROR---------------------->\n\n',
|
||||
match
|
||||
match,
|
||||
);
|
||||
const thoughts = text.replace(/[tT]hought:/, '').split('\n');
|
||||
// return {
|
||||
@@ -77,7 +77,7 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
||||
|
||||
return {
|
||||
returnValues: { output: thoughts[0] },
|
||||
log: thoughts.slice(1).join('\n')
|
||||
log: thoughts.slice(1).join('\n'),
|
||||
};
|
||||
}
|
||||
|
||||
@@ -86,12 +86,12 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
||||
if (match && selectedTool === 'n/a') {
|
||||
console.log(
|
||||
'\n\n<----------------------HIT N/A PARSING ERROR---------------------->\n\n',
|
||||
match
|
||||
match,
|
||||
);
|
||||
return {
|
||||
tool: 'self-reflection',
|
||||
toolInput: match[2]?.trim().replace(/^"+|"+$/g, '') ?? '',
|
||||
log: text
|
||||
log: text,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -99,7 +99,7 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
||||
if (match && !toolIsValid) {
|
||||
console.log(
|
||||
'\n\n<----------------Tool invalid: Re-assigning Selected Tool---------------->\n\n',
|
||||
match
|
||||
match,
|
||||
);
|
||||
selectedTool = this.getValidTool(selectedTool);
|
||||
}
|
||||
@@ -107,7 +107,7 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
||||
if (match && !selectedTool) {
|
||||
console.log(
|
||||
'\n\n<----------------------HIT INVALID TOOL PARSING ERROR---------------------->\n\n',
|
||||
match
|
||||
match,
|
||||
);
|
||||
selectedTool = 'self-reflection';
|
||||
}
|
||||
@@ -115,7 +115,7 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
||||
if (match && !match[2]) {
|
||||
console.log(
|
||||
'\n\n<----------------------HIT NO ACTION INPUT PARSING ERROR---------------------->\n\n',
|
||||
match
|
||||
match,
|
||||
);
|
||||
|
||||
// In case there is no action input, let's double-check if there is an action input in 'text' variable
|
||||
@@ -125,7 +125,7 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
||||
return {
|
||||
tool: selectedTool,
|
||||
toolInput: actionInputMatch[1].trim(),
|
||||
log: text
|
||||
log: text,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -133,7 +133,7 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
||||
return {
|
||||
tool: selectedTool,
|
||||
toolInput: thoughtMatch[1].trim(),
|
||||
log: text
|
||||
log: text,
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -158,12 +158,12 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
||||
if (action && actionInputMatch) {
|
||||
console.log(
|
||||
'\n\n<------Matched Action Input in Long Parsing Error------>\n\n',
|
||||
actionInputMatch
|
||||
actionInputMatch,
|
||||
);
|
||||
return {
|
||||
tool: action,
|
||||
toolInput: actionInputMatch[1].trim().replaceAll('"', ''),
|
||||
log: text
|
||||
log: text,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -180,7 +180,7 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
||||
const returnValues = {
|
||||
tool: action,
|
||||
toolInput: input,
|
||||
log: thought || inputText
|
||||
log: thought || inputText,
|
||||
};
|
||||
|
||||
const inputMatch = this.actionValues.exec(returnValues.log); //new
|
||||
@@ -197,7 +197,7 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
||||
return {
|
||||
tool: 'self-reflection',
|
||||
toolInput: 'Hypothetical actions: \n"' + text + '"\n',
|
||||
log: 'Thought: I need to look at my hypothetical actions and try one'
|
||||
log: 'Thought: I need to look at my hypothetical actions and try one',
|
||||
};
|
||||
}
|
||||
|
||||
@@ -210,7 +210,7 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
||||
return {
|
||||
tool: selectedTool,
|
||||
toolInput: match[2]?.trim()?.replace(/^"+|"+$/g, '') ?? '',
|
||||
log: text
|
||||
log: text,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,9 +5,9 @@ const {
|
||||
ChatPromptTemplate,
|
||||
MessagesPlaceholder,
|
||||
SystemMessagePromptTemplate,
|
||||
HumanMessagePromptTemplate
|
||||
HumanMessagePromptTemplate,
|
||||
} = require('langchain/prompts');
|
||||
const PREFIX = `You are a helpful AI assistant.`;
|
||||
const PREFIX = 'You are a helpful AI assistant.';
|
||||
|
||||
function parseOutput(message) {
|
||||
if (message.additional_kwargs.function_call) {
|
||||
@@ -15,7 +15,7 @@ function parseOutput(message) {
|
||||
return {
|
||||
tool: function_call.name,
|
||||
toolInput: function_call.arguments ? JSON.parse(function_call.arguments) : {},
|
||||
log: message.text
|
||||
log: message.text,
|
||||
};
|
||||
} else {
|
||||
return { returnValues: { output: message.text }, log: message.text };
|
||||
@@ -52,7 +52,7 @@ class FunctionsAgent extends Agent {
|
||||
return ChatPromptTemplate.fromPromptMessages([
|
||||
SystemMessagePromptTemplate.fromTemplate(`Date: ${currentDateString}\n${prefix}`),
|
||||
new MessagesPlaceholder('chat_history'),
|
||||
HumanMessagePromptTemplate.fromTemplate(`Query: {input}`),
|
||||
HumanMessagePromptTemplate.fromTemplate('Query: {input}'),
|
||||
new MessagesPlaceholder('agent_scratchpad'),
|
||||
]);
|
||||
}
|
||||
@@ -63,12 +63,12 @@ class FunctionsAgent extends Agent {
|
||||
const chain = new LLMChain({
|
||||
prompt,
|
||||
llm,
|
||||
callbacks: args?.callbacks
|
||||
callbacks: args?.callbacks,
|
||||
});
|
||||
return new FunctionsAgent({
|
||||
llmChain: chain,
|
||||
allowedTools: tools.map((t) => t.name),
|
||||
tools
|
||||
tools,
|
||||
});
|
||||
}
|
||||
|
||||
@@ -77,10 +77,10 @@ class FunctionsAgent extends Agent {
|
||||
new AIChatMessage('', {
|
||||
function_call: {
|
||||
name: action.tool,
|
||||
arguments: JSON.stringify(action.toolInput)
|
||||
}
|
||||
arguments: JSON.stringify(action.toolInput),
|
||||
},
|
||||
}),
|
||||
new FunctionChatMessage(observation, action.tool)
|
||||
new FunctionChatMessage(observation, action.tool),
|
||||
]);
|
||||
}
|
||||
|
||||
@@ -96,7 +96,7 @@ class FunctionsAgent extends Agent {
|
||||
const llm = this.llmChain.llm;
|
||||
const valuesForPrompt = Object.assign({}, newInputs);
|
||||
const valuesForLLM = {
|
||||
tools: this.tools
|
||||
tools: this.tools,
|
||||
};
|
||||
for (let i = 0; i < this.llmChain.llm.callKeys.length; i++) {
|
||||
const key = this.llmChain.llm.callKeys[i];
|
||||
@@ -110,7 +110,7 @@ class FunctionsAgent extends Agent {
|
||||
const message = await llm.predictMessages(
|
||||
promptValue.toChatMessages(),
|
||||
valuesForLLM,
|
||||
callbackManager
|
||||
callbackManager,
|
||||
);
|
||||
console.log('message', message);
|
||||
return parseOutput(message);
|
||||
|
||||
@@ -8,7 +8,6 @@ const initializeFunctionsAgent = async ({
|
||||
// currentDateString,
|
||||
...rest
|
||||
}) => {
|
||||
|
||||
const memory = new BufferMemory({
|
||||
chatHistory: new ChatMessageHistory(pastMessages),
|
||||
memoryKey: 'chat_history',
|
||||
@@ -19,17 +18,11 @@ const initializeFunctionsAgent = async ({
|
||||
returnMessages: true,
|
||||
});
|
||||
|
||||
return await initializeAgentExecutorWithOptions(
|
||||
tools,
|
||||
model,
|
||||
{
|
||||
agentType: 'openai-functions',
|
||||
memory,
|
||||
...rest,
|
||||
}
|
||||
);
|
||||
|
||||
return await initializeAgentExecutorWithOptions(tools, model, {
|
||||
agentType: 'openai-functions',
|
||||
memory,
|
||||
...rest,
|
||||
});
|
||||
};
|
||||
|
||||
module.exports = initializeFunctionsAgent;
|
||||
|
||||
|
||||
@@ -3,5 +3,5 @@ const initializeFunctionsAgent = require('./Functions/initializeFunctionsAgent')
|
||||
|
||||
module.exports = {
|
||||
initializeCustomAgent,
|
||||
initializeFunctionsAgent
|
||||
};
|
||||
initializeFunctionsAgent,
|
||||
};
|
||||
|
||||
@@ -3,6 +3,7 @@ const OpenAIClient = require('./OpenAIClient');
|
||||
const PluginsClient = require('./PluginsClient');
|
||||
const GoogleClient = require('./GoogleClient');
|
||||
const TextStream = require('./TextStream');
|
||||
const AnthropicClient = require('./AnthropicClient');
|
||||
const toolUtils = require('./tools/util');
|
||||
|
||||
module.exports = {
|
||||
@@ -11,5 +12,6 @@ module.exports = {
|
||||
PluginsClient,
|
||||
GoogleClient,
|
||||
TextStream,
|
||||
...toolUtils
|
||||
};
|
||||
AnthropicClient,
|
||||
...toolUtils,
|
||||
};
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
module.exports = {
|
||||
instructions: `Remember, all your responses MUST be in the format described. Do not respond unless it's in the format described, using the structure of Action, Action Input, etc.`,
|
||||
errorInstructions: `\nYou encountered an error in attempting a response. The user is not aware of the error so you shouldn't mention it.\nReview the actions taken carefully in case there is a partial or complete answer within them.\nError Message:`,
|
||||
imageInstructions: 'You must include the exact image paths from above, formatted in Markdown syntax: ',
|
||||
completionInstructions: `Instructions:\nYou are ChatGPT, a large language model trained by OpenAI. Respond conversationally.\nCurrent date:`,
|
||||
instructions:
|
||||
'Remember, all your responses MUST be in the format described. Do not respond unless it\'s in the format described, using the structure of Action, Action Input, etc.',
|
||||
errorInstructions:
|
||||
'\nYou encountered an error in attempting a response. The user is not aware of the error so you shouldn\'t mention it.\nReview the actions taken carefully in case there is a partial or complete answer within them.\nError Message:',
|
||||
imageInstructions:
|
||||
'You must include the exact image paths from above, formatted in Markdown syntax: ',
|
||||
completionInstructions:
|
||||
'Instructions:\nYou are ChatGPT, a large language model trained by OpenAI. Respond conversationally.\nCurrent date:',
|
||||
};
|
||||
|
||||
@@ -16,9 +16,9 @@ REFINED CONVERSATION SUMMARY:`;
|
||||
|
||||
const refinePrompt = new PromptTemplate({
|
||||
template: refinePromptTemplate,
|
||||
inputVariables: ["existing_answer", "text"],
|
||||
inputVariables: ['existing_answer', 'text'],
|
||||
});
|
||||
|
||||
module.exports = {
|
||||
refinePrompt,
|
||||
};
|
||||
};
|
||||
|
||||
@@ -10,7 +10,7 @@ jest.mock('../../../models', () => {
|
||||
getMessages: jest.fn(),
|
||||
saveMessage: jest.fn(),
|
||||
updateMessage: jest.fn(),
|
||||
saveConvo: jest.fn()
|
||||
saveConvo: jest.fn(),
|
||||
};
|
||||
};
|
||||
});
|
||||
@@ -52,7 +52,7 @@ describe('BaseClient', () => {
|
||||
modelOptions: {
|
||||
model: 'gpt-3.5-turbo',
|
||||
temperature: 0,
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
@@ -60,22 +60,14 @@ describe('BaseClient', () => {
|
||||
});
|
||||
|
||||
test('returns the input messages without instructions when addInstructions() is called with empty instructions', () => {
|
||||
const messages = [
|
||||
{ content: 'Hello' },
|
||||
{ content: 'How are you?' },
|
||||
{ content: 'Goodbye' },
|
||||
];
|
||||
const messages = [{ content: 'Hello' }, { content: 'How are you?' }, { content: 'Goodbye' }];
|
||||
const instructions = '';
|
||||
const result = TestClient.addInstructions(messages, instructions);
|
||||
expect(result).toEqual(messages);
|
||||
});
|
||||
|
||||
test('returns the input messages with instructions properly added when addInstructions() is called with non-empty instructions', () => {
|
||||
const messages = [
|
||||
{ content: 'Hello' },
|
||||
{ content: 'How are you?' },
|
||||
{ content: 'Goodbye' },
|
||||
];
|
||||
const messages = [{ content: 'Hello' }, { content: 'How are you?' }, { content: 'Goodbye' }];
|
||||
const instructions = { content: 'Please respond to the question.' };
|
||||
const result = TestClient.addInstructions(messages, instructions);
|
||||
const expected = [
|
||||
@@ -94,20 +86,21 @@ describe('BaseClient', () => {
|
||||
{ name: 'User', content: 'I have a question.' },
|
||||
];
|
||||
const result = TestClient.concatenateMessages(messages);
|
||||
const expected = `User:\nHello\n\nAssistant:\nHow can I help you?\n\nUser:\nI have a question.\n\n`;
|
||||
const expected =
|
||||
'User:\nHello\n\nAssistant:\nHow can I help you?\n\nUser:\nI have a question.\n\n';
|
||||
expect(result).toBe(expected);
|
||||
});
|
||||
|
||||
test('refines messages correctly in refineMessages()', async () => {
|
||||
const messagesToRefine = [
|
||||
{ role: 'user', content: 'Hello', tokenCount: 10 },
|
||||
{ role: 'assistant', content: 'How can I help you?', tokenCount: 20 }
|
||||
{ role: 'assistant', content: 'How can I help you?', tokenCount: 20 },
|
||||
];
|
||||
const remainingContextTokens = 100;
|
||||
const expectedRefinedMessage = {
|
||||
role: 'assistant',
|
||||
content: 'Refined answer',
|
||||
tokenCount: 14 // 'Refined answer'.length
|
||||
tokenCount: 14, // 'Refined answer'.length
|
||||
};
|
||||
|
||||
const result = await TestClient.refineMessages(messagesToRefine, remainingContextTokens);
|
||||
@@ -120,7 +113,7 @@ describe('BaseClient', () => {
|
||||
TestClient.refineMessages = jest.fn().mockResolvedValue({
|
||||
role: 'assistant',
|
||||
content: 'Refined answer',
|
||||
tokenCount: 30
|
||||
tokenCount: 30,
|
||||
});
|
||||
|
||||
const messages = [
|
||||
@@ -148,7 +141,7 @@ describe('BaseClient', () => {
|
||||
TestClient.refineMessages = jest.fn().mockResolvedValue({
|
||||
role: 'assistant',
|
||||
content: 'Refined answer',
|
||||
tokenCount: 4
|
||||
tokenCount: 4,
|
||||
});
|
||||
|
||||
const messages = [
|
||||
@@ -176,28 +169,28 @@ describe('BaseClient', () => {
|
||||
});
|
||||
|
||||
test('handles context strategy correctly in handleContextStrategy()', async () => {
|
||||
TestClient.addInstructions = jest.fn().mockReturnValue([
|
||||
{ content: 'Hello' },
|
||||
{ content: 'How can I help you?' },
|
||||
{ content: 'Please provide more details.' },
|
||||
{ content: 'I can assist you with that.' }
|
||||
]);
|
||||
TestClient.addInstructions = jest
|
||||
.fn()
|
||||
.mockReturnValue([
|
||||
{ content: 'Hello' },
|
||||
{ content: 'How can I help you?' },
|
||||
{ content: 'Please provide more details.' },
|
||||
{ content: 'I can assist you with that.' },
|
||||
]);
|
||||
TestClient.getMessagesWithinTokenLimit = jest.fn().mockReturnValue({
|
||||
context: [
|
||||
{ content: 'How can I help you?' },
|
||||
{ content: 'Please provide more details.' },
|
||||
{ content: 'I can assist you with that.' }
|
||||
{ content: 'I can assist you with that.' },
|
||||
],
|
||||
remainingContextTokens: 80,
|
||||
messagesToRefine: [
|
||||
{ content: 'Hello' },
|
||||
],
|
||||
messagesToRefine: [{ content: 'Hello' }],
|
||||
refineIndex: 3,
|
||||
});
|
||||
TestClient.refineMessages = jest.fn().mockResolvedValue({
|
||||
role: 'assistant',
|
||||
content: 'Refined answer',
|
||||
tokenCount: 30
|
||||
tokenCount: 30,
|
||||
});
|
||||
TestClient.getTokenCountForResponse = jest.fn().mockReturnValue(40);
|
||||
|
||||
@@ -206,24 +199,24 @@ describe('BaseClient', () => {
|
||||
{ content: 'Hello' },
|
||||
{ content: 'How can I help you?' },
|
||||
{ content: 'Please provide more details.' },
|
||||
{ content: 'I can assist you with that.' }
|
||||
{ content: 'I can assist you with that.' },
|
||||
];
|
||||
const formattedMessages = [
|
||||
{ content: 'Hello' },
|
||||
{ content: 'How can I help you?' },
|
||||
{ content: 'Please provide more details.' },
|
||||
{ content: 'I can assist you with that.' }
|
||||
{ content: 'I can assist you with that.' },
|
||||
];
|
||||
const expectedResult = {
|
||||
payload: [
|
||||
{
|
||||
content: 'Refined answer',
|
||||
role: 'assistant',
|
||||
tokenCount: 30
|
||||
tokenCount: 30,
|
||||
},
|
||||
{ content: 'How can I help you?' },
|
||||
{ content: 'Please provide more details.' },
|
||||
{ content: 'I can assist you with that.' }
|
||||
{ content: 'I can assist you with that.' },
|
||||
],
|
||||
promptTokens: expect.any(Number),
|
||||
tokenCountMap: {},
|
||||
@@ -246,7 +239,7 @@ describe('BaseClient', () => {
|
||||
isCreatedByUser: false,
|
||||
messageId: expect.any(String),
|
||||
parentMessageId: expect.any(String),
|
||||
conversationId: expect.any(String)
|
||||
conversationId: expect.any(String),
|
||||
});
|
||||
|
||||
const response = await TestClient.sendMessage(userMessage);
|
||||
@@ -261,7 +254,7 @@ describe('BaseClient', () => {
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
getIds: jest.fn(),
|
||||
onStart: jest.fn()
|
||||
onStart: jest.fn(),
|
||||
};
|
||||
|
||||
const expectedResult = expect.objectContaining({
|
||||
@@ -270,7 +263,7 @@ describe('BaseClient', () => {
|
||||
isCreatedByUser: false,
|
||||
messageId: expect.any(String),
|
||||
parentMessageId: expect.any(String),
|
||||
conversationId: opts.conversationId
|
||||
conversationId: opts.conversationId,
|
||||
});
|
||||
|
||||
const response = await TestClient.sendMessage(userMessage, opts);
|
||||
@@ -300,7 +293,10 @@ describe('BaseClient', () => {
|
||||
test('loadHistory is called with the correct arguments', async () => {
|
||||
const opts = { conversationId: '123', parentMessageId: '456' };
|
||||
await TestClient.sendMessage('Hello, world!', opts);
|
||||
expect(TestClient.loadHistory).toHaveBeenCalledWith(opts.conversationId, opts.parentMessageId);
|
||||
expect(TestClient.loadHistory).toHaveBeenCalledWith(
|
||||
opts.conversationId,
|
||||
opts.parentMessageId,
|
||||
);
|
||||
});
|
||||
|
||||
test('getIds is called with the correct arguments', async () => {
|
||||
@@ -310,7 +306,7 @@ describe('BaseClient', () => {
|
||||
expect(getIds).toHaveBeenCalledWith({
|
||||
userMessage: expect.objectContaining({ text: 'Hello, world!' }),
|
||||
conversationId: response.conversationId,
|
||||
responseMessageId: response.messageId
|
||||
responseMessageId: response.messageId,
|
||||
});
|
||||
});
|
||||
|
||||
@@ -333,10 +329,10 @@ describe('BaseClient', () => {
|
||||
isCreatedByUser: expect.any(Boolean),
|
||||
messageId: expect.any(String),
|
||||
parentMessageId: expect.any(String),
|
||||
conversationId: expect.any(String)
|
||||
conversationId: expect.any(String),
|
||||
}),
|
||||
saveOptions,
|
||||
user
|
||||
user,
|
||||
);
|
||||
});
|
||||
|
||||
@@ -358,14 +354,16 @@ describe('BaseClient', () => {
|
||||
|
||||
test('returns an object with the correct shape', async () => {
|
||||
const response = await TestClient.sendMessage('Hello, world!', {});
|
||||
expect(response).toEqual(expect.objectContaining({
|
||||
sender: expect.any(String),
|
||||
text: expect.any(String),
|
||||
isCreatedByUser: expect.any(Boolean),
|
||||
messageId: expect.any(String),
|
||||
parentMessageId: expect.any(String),
|
||||
conversationId: expect.any(String)
|
||||
}));
|
||||
expect(response).toEqual(
|
||||
expect.objectContaining({
|
||||
sender: expect.any(String),
|
||||
text: expect.any(String),
|
||||
isCreatedByUser: expect.any(Boolean),
|
||||
messageId: expect.any(String),
|
||||
parentMessageId: expect.any(String),
|
||||
conversationId: expect.any(String),
|
||||
}),
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -32,9 +32,11 @@ class FakeClient extends BaseClient {
|
||||
this.modelOptions = {
|
||||
...modelOptions,
|
||||
model: modelOptions.model || 'gpt-3.5-turbo',
|
||||
temperature: typeof modelOptions.temperature === 'undefined' ? 0.8 : modelOptions.temperature,
|
||||
temperature:
|
||||
typeof modelOptions.temperature === 'undefined' ? 0.8 : modelOptions.temperature,
|
||||
top_p: typeof modelOptions.top_p === 'undefined' ? 1 : modelOptions.top_p,
|
||||
presence_penalty: typeof modelOptions.presence_penalty === 'undefined' ? 1 : modelOptions.presence_penalty,
|
||||
presence_penalty:
|
||||
typeof modelOptions.presence_penalty === 'undefined' ? 1 : modelOptions.presence_penalty,
|
||||
stop: modelOptions.stop,
|
||||
};
|
||||
}
|
||||
@@ -66,7 +68,7 @@ const initializeFakeClient = (apiKey, options, fakeMessages) => {
|
||||
|
||||
const orderedMessages = TestClient.constructor.getMessagesForConversation(
|
||||
fakeMessages,
|
||||
parentMessageId
|
||||
parentMessageId,
|
||||
);
|
||||
|
||||
TestClient.currentMessages = orderedMessages;
|
||||
@@ -98,7 +100,7 @@ const initializeFakeClient = (apiKey, options, fakeMessages) => {
|
||||
|
||||
this.pastMessages = await TestClient.loadHistory(
|
||||
conversationId,
|
||||
TestClient.options?.parentMessageId
|
||||
TestClient.options?.parentMessageId,
|
||||
);
|
||||
|
||||
const userMessage = {
|
||||
@@ -107,7 +109,7 @@ const initializeFakeClient = (apiKey, options, fakeMessages) => {
|
||||
isCreatedByUser: true,
|
||||
messageId: userMessageId,
|
||||
parentMessageId,
|
||||
conversationId
|
||||
conversationId,
|
||||
};
|
||||
|
||||
const response = {
|
||||
@@ -116,7 +118,7 @@ const initializeFakeClient = (apiKey, options, fakeMessages) => {
|
||||
isCreatedByUser: false,
|
||||
messageId: crypto.randomUUID(),
|
||||
parentMessageId: userMessage.messageId,
|
||||
conversationId
|
||||
conversationId,
|
||||
};
|
||||
|
||||
fakeMessages.push(userMessage);
|
||||
@@ -126,7 +128,7 @@ const initializeFakeClient = (apiKey, options, fakeMessages) => {
|
||||
opts.getIds({
|
||||
userMessage,
|
||||
conversationId,
|
||||
responseMessageId: response.messageId
|
||||
responseMessageId: response.messageId,
|
||||
});
|
||||
}
|
||||
|
||||
@@ -146,7 +148,10 @@ const initializeFakeClient = (apiKey, options, fakeMessages) => {
|
||||
// userMessage is always the last one in the payload
|
||||
if (i === payload.length - 1) {
|
||||
userMessage.tokenCount = message.tokenCount;
|
||||
console.debug(`Token count for user message: ${tokenCount}`, `Instruction Tokens: ${tokenCountMap.instructions || 'N/A'}`);
|
||||
console.debug(
|
||||
`Token count for user message: ${tokenCount}`,
|
||||
`Instruction Tokens: ${tokenCountMap.instructions || 'N/A'}`,
|
||||
);
|
||||
}
|
||||
return messageWithoutTokenCount;
|
||||
});
|
||||
@@ -163,7 +168,10 @@ const initializeFakeClient = (apiKey, options, fakeMessages) => {
|
||||
});
|
||||
|
||||
TestClient.buildMessages = jest.fn(async (messages, parentMessageId) => {
|
||||
const orderedMessages = TestClient.constructor.getMessagesForConversation(messages, parentMessageId);
|
||||
const orderedMessages = TestClient.constructor.getMessagesForConversation(
|
||||
messages,
|
||||
parentMessageId,
|
||||
);
|
||||
const formattedMessages = orderedMessages.map((message) => {
|
||||
let { role: _role, sender, text } = message;
|
||||
const role = _role ?? sender;
|
||||
@@ -180,6 +188,6 @@ const initializeFakeClient = (apiKey, options, fakeMessages) => {
|
||||
});
|
||||
|
||||
return TestClient;
|
||||
}
|
||||
};
|
||||
|
||||
module.exports = { FakeClient, initializeFakeClient };
|
||||
module.exports = { FakeClient, initializeFakeClient };
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
const OpenAIClient = require('../OpenAIClient');
|
||||
|
||||
describe('OpenAIClient', () => {
|
||||
let client;
|
||||
let client, client2;
|
||||
const model = 'gpt-4';
|
||||
const parentMessageId = '1';
|
||||
const messages = [
|
||||
{ role: 'user', sender: 'User', text: 'Hello', messageId: parentMessageId},
|
||||
{ role: 'user', sender: 'User', text: 'Hello', messageId: parentMessageId },
|
||||
{ role: 'assistant', sender: 'Assistant', text: 'Hi', messageId: '2' },
|
||||
];
|
||||
|
||||
@@ -19,11 +19,13 @@ describe('OpenAIClient', () => {
|
||||
},
|
||||
};
|
||||
client = new OpenAIClient('test-api-key', options);
|
||||
client2 = new OpenAIClient('test-api-key', options);
|
||||
client.refineMessages = jest.fn().mockResolvedValue({
|
||||
role: 'assistant',
|
||||
content: 'Refined answer',
|
||||
tokenCount: 30
|
||||
tokenCount: 30,
|
||||
});
|
||||
client.constructor.freeAndResetAllEncoders();
|
||||
});
|
||||
|
||||
describe('setOptions', () => {
|
||||
@@ -34,10 +36,25 @@ describe('OpenAIClient', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('freeAndResetEncoder', () => {
|
||||
it('should reset the encoder', () => {
|
||||
client.freeAndResetEncoder();
|
||||
expect(client.gptEncoder).toBeDefined();
|
||||
describe('selectTokenizer', () => {
|
||||
it('should get the correct tokenizer based on the instance state', () => {
|
||||
const tokenizer = client.selectTokenizer();
|
||||
expect(tokenizer).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('freeAllTokenizers', () => {
|
||||
it('should free all tokenizers', () => {
|
||||
// Create a tokenizer
|
||||
const tokenizer = client.selectTokenizer();
|
||||
|
||||
// Mock 'free' method on the tokenizer
|
||||
tokenizer.free = jest.fn();
|
||||
|
||||
client.constructor.freeAndResetAllEncoders();
|
||||
|
||||
// Check if 'free' method has been called on the tokenizer
|
||||
expect(tokenizer.free).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -48,7 +65,7 @@ describe('OpenAIClient', () => {
|
||||
});
|
||||
|
||||
it('should reset the encoder and count when count reaches 25', () => {
|
||||
const freeAndResetEncoderSpy = jest.spyOn(client, 'freeAndResetEncoder');
|
||||
const freeAndResetEncoderSpy = jest.spyOn(client.constructor, 'freeAndResetAllEncoders');
|
||||
|
||||
// Call getTokenCount 25 times
|
||||
for (let i = 0; i < 25; i++) {
|
||||
@@ -59,7 +76,8 @@ describe('OpenAIClient', () => {
|
||||
});
|
||||
|
||||
it('should not reset the encoder and count when count is less than 25', () => {
|
||||
const freeAndResetEncoderSpy = jest.spyOn(client, 'freeAndResetEncoder');
|
||||
const freeAndResetEncoderSpy = jest.spyOn(client.constructor, 'freeAndResetAllEncoders');
|
||||
freeAndResetEncoderSpy.mockClear();
|
||||
|
||||
// Call getTokenCount 24 times
|
||||
for (let i = 0; i < 24; i++) {
|
||||
@@ -70,8 +88,10 @@ describe('OpenAIClient', () => {
|
||||
});
|
||||
|
||||
it('should handle errors and reset the encoder', () => {
|
||||
const freeAndResetEncoderSpy = jest.spyOn(client, 'freeAndResetEncoder');
|
||||
client.gptEncoder.encode = jest.fn().mockImplementation(() => {
|
||||
const freeAndResetEncoderSpy = jest.spyOn(client.constructor, 'freeAndResetAllEncoders');
|
||||
|
||||
// Mock encode function to throw an error
|
||||
client.selectTokenizer().encode = jest.fn().mockImplementation(() => {
|
||||
throw new Error('Test error');
|
||||
});
|
||||
|
||||
@@ -79,6 +99,14 @@ describe('OpenAIClient', () => {
|
||||
|
||||
expect(freeAndResetEncoderSpy).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should not throw null pointer error when freeing the same encoder twice', () => {
|
||||
client.constructor.freeAndResetAllEncoders();
|
||||
client2.constructor.freeAndResetAllEncoders();
|
||||
|
||||
const count = client2.getTokenCount('test text');
|
||||
expect(count).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getSaveOptions', () => {
|
||||
@@ -100,60 +128,83 @@ describe('OpenAIClient', () => {
|
||||
|
||||
describe('buildMessages', () => {
|
||||
it('should build messages correctly for chat completion', async () => {
|
||||
const result = await client.buildMessages(messages, parentMessageId, { isChatCompletion: true });
|
||||
const result = await client.buildMessages(messages, parentMessageId, {
|
||||
isChatCompletion: true,
|
||||
});
|
||||
expect(result).toHaveProperty('prompt');
|
||||
});
|
||||
|
||||
it('should build messages correctly for non-chat completion', async () => {
|
||||
const result = await client.buildMessages(messages, parentMessageId, { isChatCompletion: false });
|
||||
const result = await client.buildMessages(messages, parentMessageId, {
|
||||
isChatCompletion: false,
|
||||
});
|
||||
expect(result).toHaveProperty('prompt');
|
||||
});
|
||||
|
||||
it('should build messages correctly with a promptPrefix', async () => {
|
||||
const result = await client.buildMessages(messages, parentMessageId, { isChatCompletion: true, promptPrefix: 'Test Prefix' });
|
||||
const result = await client.buildMessages(messages, parentMessageId, {
|
||||
isChatCompletion: true,
|
||||
promptPrefix: 'Test Prefix',
|
||||
});
|
||||
expect(result).toHaveProperty('prompt');
|
||||
const instructions = result.prompt.find(item => item.name === 'instructions');
|
||||
const instructions = result.prompt.find((item) => item.name === 'instructions');
|
||||
expect(instructions).toBeDefined();
|
||||
expect(instructions.content).toContain('Test Prefix');
|
||||
});
|
||||
|
||||
it('should handle context strategy correctly', async () => {
|
||||
client.contextStrategy = 'refine';
|
||||
const result = await client.buildMessages(messages, parentMessageId, { isChatCompletion: true });
|
||||
const result = await client.buildMessages(messages, parentMessageId, {
|
||||
isChatCompletion: true,
|
||||
});
|
||||
expect(result).toHaveProperty('prompt');
|
||||
expect(result).toHaveProperty('tokenCountMap');
|
||||
});
|
||||
|
||||
it('should assign name property for user messages when options.name is set', async () => {
|
||||
client.options.name = 'Test User';
|
||||
const result = await client.buildMessages(messages, parentMessageId, { isChatCompletion: true });
|
||||
const hasUserWithName = result.prompt.some(item => item.role === 'user' && item.name === 'Test User');
|
||||
const result = await client.buildMessages(messages, parentMessageId, {
|
||||
isChatCompletion: true,
|
||||
});
|
||||
const hasUserWithName = result.prompt.some(
|
||||
(item) => item.role === 'user' && item.name === 'Test User',
|
||||
);
|
||||
expect(hasUserWithName).toBe(true);
|
||||
});
|
||||
|
||||
it('should calculate tokenCount for each message when contextStrategy is set', async () => {
|
||||
client.contextStrategy = 'refine';
|
||||
const result = await client.buildMessages(messages, parentMessageId, { isChatCompletion: true });
|
||||
const hasUserWithTokenCount = result.prompt.some(item => item.role === 'user' && item.tokenCount > 0);
|
||||
const result = await client.buildMessages(messages, parentMessageId, {
|
||||
isChatCompletion: true,
|
||||
});
|
||||
const hasUserWithTokenCount = result.prompt.some(
|
||||
(item) => item.role === 'user' && item.tokenCount > 0,
|
||||
);
|
||||
expect(hasUserWithTokenCount).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle promptPrefix from options when promptPrefix argument is not provided', async () => {
|
||||
client.options.promptPrefix = 'Test Prefix from options';
|
||||
const result = await client.buildMessages(messages, parentMessageId, { isChatCompletion: true });
|
||||
const instructions = result.prompt.find(item => item.name === 'instructions');
|
||||
const result = await client.buildMessages(messages, parentMessageId, {
|
||||
isChatCompletion: true,
|
||||
});
|
||||
const instructions = result.prompt.find((item) => item.name === 'instructions');
|
||||
expect(instructions.content).toContain('Test Prefix from options');
|
||||
});
|
||||
|
||||
it('should handle case when neither promptPrefix argument nor options.promptPrefix is set', async () => {
|
||||
const result = await client.buildMessages(messages, parentMessageId, { isChatCompletion: true });
|
||||
const instructions = result.prompt.find(item => item.name === 'instructions');
|
||||
const result = await client.buildMessages(messages, parentMessageId, {
|
||||
isChatCompletion: true,
|
||||
});
|
||||
const instructions = result.prompt.find((item) => item.name === 'instructions');
|
||||
expect(instructions).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should handle case when getMessagesForConversation returns null or an empty array', async () => {
|
||||
const messages = [];
|
||||
const result = await client.buildMessages(messages, parentMessageId, { isChatCompletion: true });
|
||||
const result = await client.buildMessages(messages, parentMessageId, {
|
||||
isChatCompletion: true,
|
||||
});
|
||||
expect(result.prompt).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -16,7 +16,7 @@ require('dotenv').config();
|
||||
const { OpenAIClient } = require('../');
|
||||
|
||||
function timeout(ms) {
|
||||
return new Promise(resolve => setTimeout(resolve, ms));
|
||||
return new Promise((resolve) => setTimeout(resolve, ms));
|
||||
}
|
||||
|
||||
const run = async () => {
|
||||
@@ -46,7 +46,7 @@ const run = async () => {
|
||||
model,
|
||||
},
|
||||
proxy: process.env.PROXY || null,
|
||||
debug: true
|
||||
debug: true,
|
||||
};
|
||||
|
||||
let apiKey = process.env.OPENAI_API_KEY;
|
||||
@@ -59,7 +59,13 @@ const run = async () => {
|
||||
function printProgressBar(percentageUsed) {
|
||||
const filledBlocks = Math.round(percentageUsed / 2); // Each block represents 2%
|
||||
const emptyBlocks = 50 - filledBlocks; // Total blocks is 50 (each represents 2%), so the rest are empty
|
||||
const progressBar = '[' + '█'.repeat(filledBlocks) + ' '.repeat(emptyBlocks) + '] ' + percentageUsed.toFixed(2) + '%';
|
||||
const progressBar =
|
||||
'[' +
|
||||
'█'.repeat(filledBlocks) +
|
||||
' '.repeat(emptyBlocks) +
|
||||
'] ' +
|
||||
percentageUsed.toFixed(2) +
|
||||
'%';
|
||||
console.log(progressBar);
|
||||
}
|
||||
|
||||
@@ -78,10 +84,10 @@ const run = async () => {
|
||||
// encoder.free();
|
||||
|
||||
const memoryUsageDuringLoop = process.memoryUsage().heapUsed;
|
||||
const percentageUsed = memoryUsageDuringLoop / maxMemory * 100;
|
||||
const percentageUsed = (memoryUsageDuringLoop / maxMemory) * 100;
|
||||
printProgressBar(percentageUsed);
|
||||
|
||||
if (i === (iterations - 1)) {
|
||||
if (i === iterations - 1) {
|
||||
console.log(' done');
|
||||
// encoder.free();
|
||||
}
|
||||
@@ -100,7 +106,7 @@ const run = async () => {
|
||||
await timeout(15000);
|
||||
const memoryUsageAfterTimeout = process.memoryUsage().heapUsed;
|
||||
console.log(`Post timeout: ${memoryUsageAfterTimeout / 1024 / 1024} megabytes`);
|
||||
}
|
||||
};
|
||||
|
||||
run();
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ jest.mock('../../../models/Conversation', () => {
|
||||
return function () {
|
||||
return {
|
||||
save: jest.fn(),
|
||||
deleteConvos: jest.fn()
|
||||
deleteConvos: jest.fn(),
|
||||
};
|
||||
};
|
||||
});
|
||||
@@ -19,11 +19,11 @@ describe('PluginsClient', () => {
|
||||
modelOptions: {
|
||||
model: 'gpt-3.5-turbo',
|
||||
temperature: 0,
|
||||
max_tokens: 2
|
||||
max_tokens: 2,
|
||||
},
|
||||
agentOptions: {
|
||||
model: 'gpt-3.5-turbo'
|
||||
}
|
||||
model: 'gpt-3.5-turbo',
|
||||
},
|
||||
};
|
||||
let parentMessageId;
|
||||
let conversationId;
|
||||
@@ -43,13 +43,13 @@ describe('PluginsClient', () => {
|
||||
|
||||
const orderedMessages = TestAgent.constructor.getMessagesForConversation(
|
||||
fakeMessages,
|
||||
parentMessageId
|
||||
parentMessageId,
|
||||
);
|
||||
|
||||
const chatMessages = orderedMessages.map((msg) =>
|
||||
msg?.isCreatedByUser || msg?.role?.toLowerCase() === 'user'
|
||||
? new HumanChatMessage(msg.text)
|
||||
: new AIChatMessage(msg.text)
|
||||
: new AIChatMessage(msg.text),
|
||||
);
|
||||
|
||||
TestAgent.currentMessages = orderedMessages;
|
||||
@@ -64,7 +64,7 @@ describe('PluginsClient', () => {
|
||||
const userMessageId = opts.overrideParentMessageId || crypto.randomUUID();
|
||||
this.pastMessages = await TestAgent.loadHistory(
|
||||
conversationId,
|
||||
TestAgent.options?.parentMessageId
|
||||
TestAgent.options?.parentMessageId,
|
||||
);
|
||||
|
||||
const userMessage = {
|
||||
@@ -73,7 +73,7 @@ describe('PluginsClient', () => {
|
||||
isCreatedByUser: true,
|
||||
messageId: userMessageId,
|
||||
parentMessageId,
|
||||
conversationId
|
||||
conversationId,
|
||||
};
|
||||
|
||||
const response = {
|
||||
@@ -82,7 +82,7 @@ describe('PluginsClient', () => {
|
||||
isCreatedByUser: false,
|
||||
messageId: crypto.randomUUID(),
|
||||
parentMessageId: userMessage.messageId,
|
||||
conversationId
|
||||
conversationId,
|
||||
};
|
||||
|
||||
fakeMessages.push(userMessage);
|
||||
@@ -107,7 +107,7 @@ describe('PluginsClient', () => {
|
||||
isCreatedByUser: false,
|
||||
messageId: expect.any(String),
|
||||
parentMessageId: expect.any(String),
|
||||
conversationId: expect.any(String)
|
||||
conversationId: expect.any(String),
|
||||
});
|
||||
|
||||
const response = await TestAgent.sendMessage(userMessage);
|
||||
@@ -121,7 +121,7 @@ describe('PluginsClient', () => {
|
||||
const userMessage = 'Second message in the conversation';
|
||||
const opts = {
|
||||
conversationId,
|
||||
parentMessageId
|
||||
parentMessageId,
|
||||
};
|
||||
|
||||
const expectedResult = expect.objectContaining({
|
||||
@@ -130,7 +130,7 @@ describe('PluginsClient', () => {
|
||||
isCreatedByUser: false,
|
||||
messageId: expect.any(String),
|
||||
parentMessageId: expect.any(String),
|
||||
conversationId: opts.conversationId
|
||||
conversationId: opts.conversationId,
|
||||
});
|
||||
|
||||
const response = await TestAgent.sendMessage(userMessage, opts);
|
||||
|
||||
18
api/app/clients/tools/.well-known/Ai_PDF.json
Normal file
18
api/app/clients/tools/.well-known/Ai_PDF.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"schema_version": "v1",
|
||||
"name_for_human": "Ai PDF",
|
||||
"name_for_model": "Ai_PDF",
|
||||
"description_for_human": "Super-fast, interactive chats with PDFs of any size, complete with page references for fact checking.",
|
||||
"description_for_model": "Provide a URL to a PDF and search the document. Break the user question in multiple semantic search queries and calls as needed. Think step by step.",
|
||||
"auth": {
|
||||
"type": "none"
|
||||
},
|
||||
"api": {
|
||||
"type": "openapi",
|
||||
"url": "https://plugin-3c56b9d4c8a6465998395f28b6a445b2-jexkai4vea-uc.a.run.app/openapi.yaml",
|
||||
"is_user_authenticated": false
|
||||
},
|
||||
"logo_url": "https://plugin-3c56b9d4c8a6465998395f28b6a445b2-jexkai4vea-uc.a.run.app/logo.png",
|
||||
"contact_email": "support@promptapps.ai",
|
||||
"legal_info_url": "https://plugin-3c56b9d4c8a6465998395f28b6a445b2-jexkai4vea-uc.a.run.app/legal.html"
|
||||
}
|
||||
18
api/app/clients/tools/.well-known/Diagrams.json
Normal file
18
api/app/clients/tools/.well-known/Diagrams.json
Normal file
File diff suppressed because one or more lines are too long
97
api/app/clients/tools/.well-known/Dr_Thoths_Tarot.json
Normal file
97
api/app/clients/tools/.well-known/Dr_Thoths_Tarot.json
Normal file
@@ -0,0 +1,97 @@
|
||||
{
|
||||
"schema_version": "v1",
|
||||
"name_for_human": "Dr. Thoth's Tarot",
|
||||
"name_for_model": "Dr_Thoths_Tarot",
|
||||
"description_for_human": "Tarot card novelty entertainment & analysis, by Mnemosyne Labs.",
|
||||
"description_for_model": "Intelligent analysis program for tarot card entertaiment, data, & prompts, by Mnemosyne Labs, a division of AzothCorp.",
|
||||
"auth": {
|
||||
"type": "none"
|
||||
},
|
||||
"api": {
|
||||
"type": "openapi",
|
||||
"url": "https://dr-thoth-tarot.herokuapp.com/openapi.yaml",
|
||||
"is_user_authenticated": false
|
||||
},
|
||||
"logo_url": "https://dr-thoth-tarot.herokuapp.com/logo.png",
|
||||
"contact_email": "legal@AzothCorp.com",
|
||||
"legal_info_url": "http://AzothCorp.com/legal",
|
||||
"endpoints": [
|
||||
{
|
||||
"name": "Draw Card",
|
||||
"path": "/drawcard",
|
||||
"method": "GET",
|
||||
"description": "Generate a single tarot card from the deck of 78 cards."
|
||||
},
|
||||
{
|
||||
"name": "Occult Card",
|
||||
"path": "/occult_card",
|
||||
"method": "GET",
|
||||
"description": "Generate a tarot card using the specified planet's Kamea matrix.",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "planet",
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"Saturn",
|
||||
"Jupiter",
|
||||
"Mars",
|
||||
"Sun",
|
||||
"Venus",
|
||||
"Mercury",
|
||||
"Moon"
|
||||
],
|
||||
"required": true,
|
||||
"description": "The planet name to use the corresponding Kamea matrix."
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Three Card Spread",
|
||||
"path": "/threecardspread",
|
||||
"method": "GET",
|
||||
"description": "Perform a three-card tarot spread."
|
||||
},
|
||||
{
|
||||
"name": "Celtic Cross Spread",
|
||||
"path": "/celticcross",
|
||||
"method": "GET",
|
||||
"description": "Perform a Celtic Cross tarot spread with 10 cards."
|
||||
},
|
||||
{
|
||||
"name": "Past, Present, Future Spread",
|
||||
"path": "/pastpresentfuture",
|
||||
"method": "GET",
|
||||
"description": "Perform a Past, Present, Future tarot spread with 3 cards."
|
||||
},
|
||||
{
|
||||
"name": "Horseshoe Spread",
|
||||
"path": "/horseshoe",
|
||||
"method": "GET",
|
||||
"description": "Perform a Horseshoe tarot spread with 7 cards."
|
||||
},
|
||||
{
|
||||
"name": "Relationship Spread",
|
||||
"path": "/relationship",
|
||||
"method": "GET",
|
||||
"description": "Perform a Relationship tarot spread."
|
||||
},
|
||||
{
|
||||
"name": "Career Spread",
|
||||
"path": "/career",
|
||||
"method": "GET",
|
||||
"description": "Perform a Career tarot spread."
|
||||
},
|
||||
{
|
||||
"name": "Yes/No Spread",
|
||||
"path": "/yesno",
|
||||
"method": "GET",
|
||||
"description": "Perform a Yes/No tarot spread."
|
||||
},
|
||||
{
|
||||
"name": "Chakra Spread",
|
||||
"path": "/chakra",
|
||||
"method": "GET",
|
||||
"description": "Perform a Chakra tarot spread with 7 cards."
|
||||
}
|
||||
]
|
||||
}
|
||||
18
api/app/clients/tools/.well-known/DreamInterpreter.json
Normal file
18
api/app/clients/tools/.well-known/DreamInterpreter.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"schema_version": "v1",
|
||||
"name_for_model": "DreamInterpreter",
|
||||
"name_for_human": "Dream Interpreter",
|
||||
"description_for_model": "Interprets your dreams using advanced techniques.",
|
||||
"description_for_human": "Interprets your dreams using advanced techniques.",
|
||||
"auth": {
|
||||
"type": "none"
|
||||
},
|
||||
"api": {
|
||||
"type": "openapi",
|
||||
"url": "https://dreamplugin.bgnetmobile.com/.well-known/openapi.json",
|
||||
"has_user_authentication": false
|
||||
},
|
||||
"logo_url": "https://dreamplugin.bgnetmobile.com/.well-known/logo.png",
|
||||
"contact_email": "ismail.orkler@bgnetmobile.com",
|
||||
"legal_info_url": "https://dreamplugin.bgnetmobile.com/terms.html"
|
||||
}
|
||||
22
api/app/clients/tools/.well-known/VoxScript.json
Normal file
22
api/app/clients/tools/.well-known/VoxScript.json
Normal file
@@ -0,0 +1,22 @@
|
||||
{
|
||||
"schema_version": "v1",
|
||||
"name_for_human": "VoxScript",
|
||||
"name_for_model": "VoxScript",
|
||||
"description_for_human": "Enables searching of YouTube transcripts, financial data sources Google Search results, and more!",
|
||||
"description_for_model": "Plugin for searching through varius data sources.",
|
||||
"auth": {
|
||||
"type": "service_http",
|
||||
"authorization_type": "bearer",
|
||||
"verification_tokens": {
|
||||
"openai": "ffc5226d1af346c08a98dee7deec9f76"
|
||||
}
|
||||
},
|
||||
"api": {
|
||||
"type": "openapi",
|
||||
"url": "https://voxscript.awt.icu/swagger/v1/swagger.yaml",
|
||||
"is_user_authenticated": false
|
||||
},
|
||||
"logo_url": "https://voxscript.awt.icu/images/VoxScript_logo_32x32.png",
|
||||
"contact_email": "voxscript@allwiretech.com",
|
||||
"legal_info_url": "https://voxscript.awt.icu/legal/"
|
||||
}
|
||||
22
api/app/clients/tools/.well-known/aitoolhunt.json
Normal file
22
api/app/clients/tools/.well-known/aitoolhunt.json
Normal file
@@ -0,0 +1,22 @@
|
||||
{
|
||||
"schema_version": "v1",
|
||||
"name_for_human": "Ai Tool Hunt",
|
||||
"name_for_model": "aitoolhunt",
|
||||
"description_for_human": "Find the perfect AI tools for all your needs, drawn from the most comprehensive global database of AI tools.",
|
||||
"description_for_model": "This universal plugin is designed to identify and suggest AI tools for any imaginable task or challenge. It harnesses a database of over 1600+ AI tools and 120+ categories, ensuring a solution for every requirement. Simply input any task, problem, or use case, and the plugin will generate a tailored list of AI tools. Note: it's crucial to rearrange the tools suggested by the plugin based on their relevance to the user's specific use case. This plugin can tackle everything from the simplest to the most intricate tasks. If there's a way an AI tool can help, this plugin will not only locate it but also customize the list to the user's unique needs. Utilize this plugin whenever a task or activity emerges, regardless of its context or complexity. When formulating a search query, avoid stop words or other irrelevant keywords. For instance, 'copywriting' is acceptable, but 'ai for copywriting' is not. If you believe none of the suggested tools are a suitable match for the user's needs, indicate that these are related tools.",
|
||||
"auth": {
|
||||
"type": "service_http",
|
||||
"authorization_type": "bearer",
|
||||
"verification_tokens": {
|
||||
"openai": "06a0f9391a5e48c7a7eeaca1e7e1e8d3"
|
||||
}
|
||||
},
|
||||
"api": {
|
||||
"type": "openapi",
|
||||
"url": "https://www.aitoolhunt.com/openapi.json",
|
||||
"is_user_authenticated": false
|
||||
},
|
||||
"logo_url": "https://www.aitoolhunt.com/images/aitoolhunt_logo.png",
|
||||
"contact_email": "aitoolhunt@gmail.com",
|
||||
"legal_info_url": "https://www.aitoolhunt.com/terms-and-conditions"
|
||||
}
|
||||
18
api/app/clients/tools/.well-known/askyourpdf.json
Normal file
18
api/app/clients/tools/.well-known/askyourpdf.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"schema_version": "v1",
|
||||
"name_for_model": "askyourpdf",
|
||||
"name_for_human": "AskYourPDF",
|
||||
"description_for_model": "This plugin is designed to expedite the extraction of information from PDF documents. It works by accepting a URL link to a PDF or a document ID (doc_id) from the user. If a URL is provided, the plugin first validates that it is a correct URL. \\nAfter validating the URL, the plugin proceeds to download the PDF and store its content in a vector database. If the user provides a doc_id, the plugin directly retrieves the document from the database. The plugin then scans through the stored PDFs to find answers to user queries or retrieve specific details.\\n\\nHowever, if an error occurs while querying the API, the user is prompted to download their document first, then manually upload it to [](https://askyourpdf.com/upload). Once the upload is complete, the user should copy the resulting doc_id and paste it back into the chat for further interaction.\nThe plugin is particularly useful when the user's question pertains to content within a PDF document. When providing answers, the plugin also specifies the page number (highlighted in bold) where the relevant information was found. Remember, the URL must be valid for a successful query. Failure to validate the URL may lead to errors or unsuccessful queries.",
|
||||
"description_for_human": "Unlock the power of your PDFs!, dive into your documents, find answers, and bring information to your fingertips.",
|
||||
"auth": {
|
||||
"type": "none"
|
||||
},
|
||||
"api": {
|
||||
"type": "openapi",
|
||||
"url": "askyourpdf.yaml",
|
||||
"has_user_authentication": false
|
||||
},
|
||||
"logo_url": "https://plugin.askyourpdf.com/.well-known/logo.png",
|
||||
"contact_email": "plugin@askyourpdf.com",
|
||||
"legal_info_url": "https://askyourpdf.com/terms"
|
||||
}
|
||||
18
api/app/clients/tools/.well-known/drink_maestro.json
Normal file
18
api/app/clients/tools/.well-known/drink_maestro.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"schema_version": "v1",
|
||||
"name_for_human": "Drink Maestro",
|
||||
"name_for_model": "drink_maestro",
|
||||
"description_for_human": "Learn to mix any drink you can imagine (real or made-up), and discover new ones. Includes drink images.",
|
||||
"description_for_model": "You are a silly bartender/comic who knows how to make any drink imaginable. You provide recipes for specific drinks, suggest new drinks, and show pictures of drinks. Be creative in your descriptions and make jokes and puns. Use a lot of emojis. If the user makes a request in another language, send API call in English, and then translate the response.",
|
||||
"auth": {
|
||||
"type": "none"
|
||||
},
|
||||
"api": {
|
||||
"type": "openapi",
|
||||
"url": "https://api.drinkmaestro.space/.well-known/openapi.yaml",
|
||||
"is_user_authenticated": false
|
||||
},
|
||||
"logo_url": "https://i.imgur.com/6q8HWdz.png",
|
||||
"contact_email": "nikkmitchell@gmail.com",
|
||||
"legal_info_url": "https://github.com/nikkmitchell/DrinkMaestro/blob/main/Legal.txt"
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"schema_version": "v1",
|
||||
"name_for_human": "Earth",
|
||||
"name_for_model": "earthImagesAndVisualizations",
|
||||
"description_for_human": "Generates a map image based on provided location, tilt and style.",
|
||||
"description_for_model": "Generates a map image based on provided coordinates or location, tilt and style, and even geoJson to provide markers, paths, and polygons. Responds with an image-link. For the styles choose one of these: [light, dark, streets, outdoors, satellite, satellite-streets]",
|
||||
"auth": {
|
||||
"type": "none"
|
||||
},
|
||||
"api": {
|
||||
"type": "openapi",
|
||||
"url": "https://api.earth-plugin.com/openapi.yaml",
|
||||
"is_user_authenticated": false
|
||||
},
|
||||
"logo_url": "https://api.earth-plugin.com/logo.png",
|
||||
"contact_email": "contact@earth-plugin.com",
|
||||
"legal_info_url": "https://api.earth-plugin.com/legal.html"
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"schema_version": "v1",
|
||||
"name_for_human": "Scholarly Graph Link",
|
||||
"name_for_model": "scholarly_graph_link",
|
||||
"description_for_human": "You can search papers, authors, datasets and software. It has access to Figshare, Arxiv, and many others.",
|
||||
"description_for_model": "Run GraphQL queries against an API hosted by DataCite API. The API supports most GraphQL query but does not support mutations statements. Use `{ __schema { types { name kind } } }` to get all the types in the GraphQL schema. Use `{ datasets { nodes { id sizes citations { nodes { id titles { title } } } } } }` to get all the citations of all datasets in the API. Use `{ datasets { nodes { id sizes citations { nodes { id titles { title } } } } } }` to get all the citations of all datasets in the API. Use `{person(id:ORCID) {works(first:50) {nodes {id titles(first: 1){title} publicationYear}}}}` to get the first 50 works of a person based on their ORCID. All Ids are urls, e.g., https://orcid.org/0012-0000-1012-1110. Mutations statements are not allowed.",
|
||||
"auth": {
|
||||
"type": "none"
|
||||
},
|
||||
"api": {
|
||||
"type": "openapi",
|
||||
"url": "https://api.datacite.org/graphql-openapi.yaml",
|
||||
"is_user_authenticated": false
|
||||
},
|
||||
"logo_url": "https://raw.githubusercontent.com/kjgarza/scholarly_graph_link/master/logo.png",
|
||||
"contact_email": "kj.garza@gmail.com",
|
||||
"legal_info_url": "https://github.com/kjgarza/scholarly_graph_link/blob/master/LICENSE"
|
||||
}
|
||||
24
api/app/clients/tools/.well-known/has-issues/web_pilot.json
Normal file
24
api/app/clients/tools/.well-known/has-issues/web_pilot.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"schema_version": "v1",
|
||||
"name_for_human": "WebPilot",
|
||||
"name_for_model": "web_pilot",
|
||||
"description_for_human": "Browse & QA Webpage/PDF/Data. Generate articles, from one or more URLs.",
|
||||
"description_for_model": "This tool allows users to provide a URL(or URLs) and optionally requests for interacting with, extracting specific information or how to do with the content from the URL. Requests may include rewrite, translate, and others. If there any requests, when accessing the /api/visit-web endpoint, the parameter 'user_has_request' should be set to 'true. And if there's no any requests, 'user_has_request' should be set to 'false'.",
|
||||
"auth": {
|
||||
"type": "none"
|
||||
},
|
||||
"api": {
|
||||
"type": "openapi",
|
||||
"url": "https://webreader.webpilotai.com/openapi.yaml",
|
||||
"is_user_authenticated": false
|
||||
},
|
||||
"logo_url": "https://webreader.webpilotai.com/logo.png",
|
||||
"contact_email": "dev@webpilot.ai",
|
||||
"legal_info_url": "https://webreader.webpilotai.com/legal_info.html",
|
||||
"headers": {
|
||||
"id": "WebPilot-Friend-UID"
|
||||
},
|
||||
"params": {
|
||||
"user_has_request": true
|
||||
}
|
||||
}
|
||||
18
api/app/clients/tools/.well-known/image_prompt_enhancer.json
Normal file
18
api/app/clients/tools/.well-known/image_prompt_enhancer.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"schema_version": "v1",
|
||||
"name_for_human": "Image Prompt Enhancer",
|
||||
"name_for_model": "image_prompt_enhancer",
|
||||
"description_for_human": "Transform your ideas into complex, personalized image generation prompts.",
|
||||
"description_for_model": "Provides instructions for crafting an enhanced image prompt. Use this whenever the user wants to enhance a prompt.",
|
||||
"auth": {
|
||||
"type": "none"
|
||||
},
|
||||
"api": {
|
||||
"type": "openapi",
|
||||
"url": "https://image-prompt-enhancer.gafo.tech/openapi.yaml",
|
||||
"is_user_authenticated": false
|
||||
},
|
||||
"logo_url": "https://image-prompt-enhancer.gafo.tech/logo.png",
|
||||
"contact_email": "gafotech1@gmail.com",
|
||||
"legal_info_url": "https://image-prompt-enhancer.gafo.tech/legal"
|
||||
}
|
||||
157
api/app/clients/tools/.well-known/openapi/askyourpdf.yaml
Normal file
157
api/app/clients/tools/.well-known/openapi/askyourpdf.yaml
Normal file
@@ -0,0 +1,157 @@
|
||||
openapi: 3.0.2
|
||||
info:
|
||||
title: FastAPI
|
||||
version: 0.1.0
|
||||
servers:
|
||||
- url: https://plugin.askyourpdf.com
|
||||
paths:
|
||||
/api/download_pdf:
|
||||
post:
|
||||
summary: Download Pdf
|
||||
description: Download a PDF file from a URL and save it to the vector database.
|
||||
operationId: download_pdf_api_download_pdf_post
|
||||
parameters:
|
||||
- required: true
|
||||
schema:
|
||||
title: Url
|
||||
type: string
|
||||
name: url
|
||||
in: query
|
||||
responses:
|
||||
'200':
|
||||
description: Successful Response
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/FileResponse'
|
||||
'422':
|
||||
description: Validation Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/HTTPValidationError'
|
||||
/query:
|
||||
post:
|
||||
summary: Perform Query
|
||||
description: Perform a query on a document.
|
||||
operationId: perform_query_query_post
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/InputData'
|
||||
required: true
|
||||
responses:
|
||||
'200':
|
||||
description: Successful Response
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ResponseModel'
|
||||
'422':
|
||||
description: Validation Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/HTTPValidationError'
|
||||
components:
|
||||
schemas:
|
||||
DocumentMetadata:
|
||||
title: DocumentMetadata
|
||||
required:
|
||||
- source
|
||||
- page_number
|
||||
- author
|
||||
type: object
|
||||
properties:
|
||||
source:
|
||||
title: Source
|
||||
type: string
|
||||
page_number:
|
||||
title: Page Number
|
||||
type: integer
|
||||
author:
|
||||
title: Author
|
||||
type: string
|
||||
FileResponse:
|
||||
title: FileResponse
|
||||
required:
|
||||
- docId
|
||||
type: object
|
||||
properties:
|
||||
docId:
|
||||
title: Docid
|
||||
type: string
|
||||
error:
|
||||
title: Error
|
||||
type: string
|
||||
HTTPValidationError:
|
||||
title: HTTPValidationError
|
||||
type: object
|
||||
properties:
|
||||
detail:
|
||||
title: Detail
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/ValidationError'
|
||||
InputData:
|
||||
title: InputData
|
||||
required:
|
||||
- doc_id
|
||||
- query
|
||||
type: object
|
||||
properties:
|
||||
doc_id:
|
||||
title: Doc Id
|
||||
type: string
|
||||
query:
|
||||
title: Query
|
||||
type: string
|
||||
ResponseModel:
|
||||
title: ResponseModel
|
||||
required:
|
||||
- results
|
||||
type: object
|
||||
properties:
|
||||
results:
|
||||
title: Results
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/SearchResult'
|
||||
SearchResult:
|
||||
title: SearchResult
|
||||
required:
|
||||
- doc_id
|
||||
- text
|
||||
- metadata
|
||||
type: object
|
||||
properties:
|
||||
doc_id:
|
||||
title: Doc Id
|
||||
type: string
|
||||
text:
|
||||
title: Text
|
||||
type: string
|
||||
metadata:
|
||||
$ref: '#/components/schemas/DocumentMetadata'
|
||||
ValidationError:
|
||||
title: ValidationError
|
||||
required:
|
||||
- loc
|
||||
- msg
|
||||
- type
|
||||
type: object
|
||||
properties:
|
||||
loc:
|
||||
title: Location
|
||||
type: array
|
||||
items:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: integer
|
||||
msg:
|
||||
title: Message
|
||||
type: string
|
||||
type:
|
||||
title: Error Type
|
||||
type: string
|
||||
185
api/app/clients/tools/.well-known/openapi/scholarai.yaml
Normal file
185
api/app/clients/tools/.well-known/openapi/scholarai.yaml
Normal file
@@ -0,0 +1,185 @@
|
||||
openapi: 3.0.1
|
||||
info:
|
||||
title: ScholarAI
|
||||
description: Allows the user to search facts and findings from scientific articles
|
||||
version: 'v1'
|
||||
servers:
|
||||
- url: https://scholar-ai.net
|
||||
paths:
|
||||
/api/abstracts:
|
||||
get:
|
||||
operationId: searchAbstracts
|
||||
summary: Get relevant paper abstracts by keywords search
|
||||
parameters:
|
||||
- name: keywords
|
||||
in: query
|
||||
description: Keywords of inquiry which should appear in article. Must be in English.
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- name: sort
|
||||
in: query
|
||||
description: The sort order for results. Valid values are cited_by_count or publication_date. Excluding this value does a relevance based search.
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
enum:
|
||||
- cited_by_count
|
||||
- publication_date
|
||||
- name: query
|
||||
in: query
|
||||
description: The user query
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- name: peer_reviewed_only
|
||||
in: query
|
||||
description: Whether to only return peer reviewed articles. Defaults to true, ChatGPT should cautiously suggest this value can be set to false
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: start_year
|
||||
in: query
|
||||
description: The first year, inclusive, to include in the search range. Excluding this value will include all years.
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: end_year
|
||||
in: query
|
||||
description: The last year, inclusive, to include in the search range. Excluding this value will include all years.
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: offset
|
||||
in: query
|
||||
description: The offset of the first result to return. Defaults to 0.
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/searchAbstractsResponse'
|
||||
/api/fulltext:
|
||||
get:
|
||||
operationId: getFullText
|
||||
summary: Get full text of a paper by URL for PDF
|
||||
parameters:
|
||||
- name: pdf_url
|
||||
in: query
|
||||
description: URL for PDF
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- name: chunk
|
||||
in: query
|
||||
description: chunk number to retrieve, defaults to 1
|
||||
required: false
|
||||
schema:
|
||||
type: number
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/getFullTextResponse'
|
||||
/api/save-citation:
|
||||
get:
|
||||
operationId: saveCitation
|
||||
summary: Save citation to reference manager
|
||||
parameters:
|
||||
- name: doi
|
||||
in: query
|
||||
description: Digital Object Identifier (DOI) of article
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- name: zotero_user_id
|
||||
in: query
|
||||
description: Zotero User ID
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- name: zotero_api_key
|
||||
in: query
|
||||
description: Zotero API Key
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/saveCitationResponse'
|
||||
components:
|
||||
schemas:
|
||||
searchAbstractsResponse:
|
||||
type: object
|
||||
properties:
|
||||
next_offset:
|
||||
type: number
|
||||
description: The offset of the next page of results.
|
||||
total_num_results:
|
||||
type: number
|
||||
description: The total number of results.
|
||||
abstracts:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
title:
|
||||
type: string
|
||||
abstract:
|
||||
type: string
|
||||
description: Summary of the context, methods, results, and conclusions of the paper.
|
||||
doi:
|
||||
type: string
|
||||
description: The DOI of the paper.
|
||||
landing_page_url:
|
||||
type: string
|
||||
description: Link to the paper on its open-access host.
|
||||
pdf_url:
|
||||
type: string
|
||||
description: Link to the paper PDF.
|
||||
publicationDate:
|
||||
type: string
|
||||
description: The date the paper was published in YYYY-MM-DD format.
|
||||
relevance:
|
||||
type: number
|
||||
description: The relevance of the paper to the search query. 1 is the most relevant.
|
||||
creators:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: The name of the creator.
|
||||
cited_by_count:
|
||||
type: number
|
||||
description: The number of citations of the article.
|
||||
description: The list of relevant abstracts.
|
||||
getFullTextResponse:
|
||||
type: object
|
||||
properties:
|
||||
full_text:
|
||||
type: string
|
||||
description: The full text of the paper.
|
||||
pdf_url:
|
||||
type: string
|
||||
description: The PDF URL of the paper.
|
||||
chunk:
|
||||
type: number
|
||||
description: The chunk of the paper.
|
||||
total_chunk_num:
|
||||
type: number
|
||||
description: The total chunks of the paper.
|
||||
saveCitationResponse:
|
||||
type: object
|
||||
properties:
|
||||
message:
|
||||
type: string
|
||||
description: Confirmation of successful save or error message.
|
||||
17
api/app/clients/tools/.well-known/qrCodes.json
Normal file
17
api/app/clients/tools/.well-known/qrCodes.json
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"schema_version": "v1",
|
||||
"name_for_human": "QR Codes",
|
||||
"name_for_model": "qrCodes",
|
||||
"description_for_human": "Create QR codes.",
|
||||
"description_for_model": "Plugin for generating QR codes.",
|
||||
"auth": {
|
||||
"type": "none"
|
||||
},
|
||||
"api": {
|
||||
"type": "openapi",
|
||||
"url": "https://chatgpt-qrcode-46d7d4ebefc8.herokuapp.com/openapi.yaml"
|
||||
},
|
||||
"logo_url": "https://chatgpt-qrcode-46d7d4ebefc8.herokuapp.com/logo.png",
|
||||
"contact_email": "chrismountzou@gmail.com",
|
||||
"legal_info_url": "https://raw.githubusercontent.com/mountzou/qrCodeGPTv1/master/legal"
|
||||
}
|
||||
18
api/app/clients/tools/.well-known/rephrase.json
Normal file
18
api/app/clients/tools/.well-known/rephrase.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"schema_version": "v1",
|
||||
"name_for_human": "Prompt Perfect",
|
||||
"name_for_model": "rephrase",
|
||||
"description_for_human": "Type 'perfect' to craft the perfect prompt, every time.",
|
||||
"description_for_model": "Plugin that can rephrase user inputs to improve the quality of ChatGPT's responses. The plugin evaluates user inputs and, if necessary, transforms them into clearer, more specific, and contextual prompts. It processes a JSON object containing the user input to be rephrased and uses the GPT-3.5-turbo model for the rephrasing process. The rephrased input is then returned as raw data to be incorporated into ChatGPT's response. The user can initiate the plugin by typing 'perfect'.",
|
||||
"auth": {
|
||||
"type": "none"
|
||||
},
|
||||
"api": {
|
||||
"type": "openapi",
|
||||
"url": "https://promptperfect.xyz/openapi.yaml",
|
||||
"is_user_authenticated": false
|
||||
},
|
||||
"logo_url": "https://promptperfect.xyz/static/prompt_perfect_logo.png",
|
||||
"contact_email": "heyo@promptperfect.xyz",
|
||||
"legal_info_url": "https://promptperfect.xyz/static/terms.html"
|
||||
}
|
||||
22
api/app/clients/tools/.well-known/scholarai.json
Normal file
22
api/app/clients/tools/.well-known/scholarai.json
Normal file
@@ -0,0 +1,22 @@
|
||||
{
|
||||
"schema_version": "v1",
|
||||
"name_for_human": "ScholarAI",
|
||||
"name_for_model": "scholarai",
|
||||
"description_for_human": "Unleash scientific research: search 40M+ peer-reviewed papers, explore scientific PDFs, and save to reference managers.",
|
||||
"description_for_model": "Access open access scientific literature from peer-reviewed journals. The abstract endpoint finds relevant papers based on 2 to 6 keywords. After getting abstracts, ALWAYS prompt the user offering to go into more detail. Use the fulltext endpoint to retrieve the entire paper's text and access specific details using the provided pdf_url, if available. ALWAYS hyperlink the pdf_url from the responses if available. Offer to dive into the fulltext or search for additional papers. Always ask if the user wants save any paper to the user’s Zotero reference manager by using the save-citation endpoint and providing the doi and requesting the user’s zotero_user_id and zotero_api_key.",
|
||||
"auth": {
|
||||
"type": "none"
|
||||
},
|
||||
"api": {
|
||||
"type": "openapi",
|
||||
"url": "scholarai.yaml",
|
||||
"is_user_authenticated": false
|
||||
},
|
||||
"params": {
|
||||
"sort": "cited_by_count"
|
||||
},
|
||||
"logo_url": "https://scholar-ai.net/logo.png",
|
||||
"contact_email": "lakshb429@gmail.com",
|
||||
"legal_info_url": "https://scholar-ai.net/legal.txt",
|
||||
"HttpAuthorizationType": "basic"
|
||||
}
|
||||
18
api/app/clients/tools/.well-known/uberchord.json
Normal file
18
api/app/clients/tools/.well-known/uberchord.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"schema_version": "v1",
|
||||
"name_for_human": "Uberchord",
|
||||
"name_for_model": "uberchord",
|
||||
"description_for_human": "Find guitar chord diagrams by specifying the chord name.",
|
||||
"description_for_model": "Fetch guitar chord diagrams, their positions on the guitar fretboard.",
|
||||
"auth": {
|
||||
"type": "none"
|
||||
},
|
||||
"api": {
|
||||
"type": "openapi",
|
||||
"url": "https://guitarchords.pluginboost.com/.well-known/openapi.yaml",
|
||||
"is_user_authenticated": false
|
||||
},
|
||||
"logo_url": "https://guitarchords.pluginboost.com/logo.png",
|
||||
"contact_email": "info.bluelightweb@gmail.com",
|
||||
"legal_info_url": "https://guitarchords.pluginboost.com/legal"
|
||||
}
|
||||
18
api/app/clients/tools/.well-known/web_search.json
Normal file
18
api/app/clients/tools/.well-known/web_search.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"schema_version": "v1",
|
||||
"name_for_human": "Web Search",
|
||||
"name_for_model": "web_search",
|
||||
"description_for_human": "Search for information from the internet",
|
||||
"description_for_model": "Search for information from the internet",
|
||||
"auth": {
|
||||
"type": "none"
|
||||
},
|
||||
"api": {
|
||||
"type": "openapi",
|
||||
"url": "https://websearch.plugsugar.com/api/openapi_yaml",
|
||||
"is_user_authenticated": false
|
||||
},
|
||||
"logo_url": "https://websearch.plugsugar.com/200x200.png",
|
||||
"contact_email": "support@plugsugar.com",
|
||||
"legal_info_url": "https://websearch.plugsugar.com/contact"
|
||||
}
|
||||
@@ -57,7 +57,7 @@ function extractShortVersion(openapiSpec) {
|
||||
const shortApiSpec = {
|
||||
openapi: fullApiSpec.openapi,
|
||||
info: fullApiSpec.info,
|
||||
paths: {}
|
||||
paths: {},
|
||||
};
|
||||
|
||||
for (let path in fullApiSpec.paths) {
|
||||
@@ -68,8 +68,8 @@ function extractShortVersion(openapiSpec) {
|
||||
operationId: fullApiSpec.paths[path][method].operationId,
|
||||
parameters: fullApiSpec.paths[path][method].parameters?.map((parameter) => ({
|
||||
name: parameter.name,
|
||||
description: parameter.description
|
||||
}))
|
||||
description: parameter.description,
|
||||
})),
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -199,14 +199,16 @@ class AIPluginTool extends Tool {
|
||||
const apiUrlRes = await fetch(aiPluginJson.api.url, {});
|
||||
if (!apiUrlRes.ok) {
|
||||
throw new Error(
|
||||
`Failed to fetch API spec from ${aiPluginJson.api.url} with status ${apiUrlRes.status}`
|
||||
`Failed to fetch API spec from ${aiPluginJson.api.url} with status ${apiUrlRes.status}`,
|
||||
);
|
||||
}
|
||||
const apiUrlJson = await apiUrlRes.text();
|
||||
const shortApiSpec = extractShortVersion(apiUrlJson);
|
||||
return new AIPluginTool({
|
||||
name: aiPluginJson.name_for_model.toLowerCase(),
|
||||
description: `A \`tool\` to learn the API documentation for ${aiPluginJson.name_for_model.toLowerCase()}, after which you can use 'http_request' to make the actual API call. Short description of how to use the API's results: ${aiPluginJson.description_for_model})`,
|
||||
description: `A \`tool\` to learn the API documentation for ${aiPluginJson.name_for_model.toLowerCase()}, after which you can use 'http_request' to make the actual API call. Short description of how to use the API's results: ${
|
||||
aiPluginJson.description_for_model
|
||||
})`,
|
||||
apiSpec: `
|
||||
As an AI, your task is to identify the operationId of the relevant API path based on the condensed OpenAPI specifications provided.
|
||||
|
||||
@@ -228,7 +230,7 @@ ${shortApiSpec}
|
||||
\`\`\`
|
||||
`,
|
||||
openaiSpec: apiUrlJson,
|
||||
model: model
|
||||
model: model,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -56,11 +56,17 @@ Guidelines:
|
||||
}
|
||||
|
||||
replaceUnwantedChars(inputString) {
|
||||
return inputString.replace(/\r\n|\r|\n/g, ' ').replace('"', '').trim();
|
||||
return inputString
|
||||
.replace(/\r\n|\r|\n/g, ' ')
|
||||
.replace('"', '')
|
||||
.trim();
|
||||
}
|
||||
|
||||
getMarkdownImageUrl(imageName) {
|
||||
const imageUrl = path.join(this.relativeImageUrl, imageName).replace(/\\/g, '/').replace('public/', '');
|
||||
const imageUrl = path
|
||||
.join(this.relativeImageUrl, imageName)
|
||||
.replace(/\\/g, '/')
|
||||
.replace('public/', '');
|
||||
return ``;
|
||||
}
|
||||
|
||||
@@ -70,13 +76,13 @@ Guidelines:
|
||||
// TODO: Future idea -- could we ask an LLM to extract these arguments from an input that might contain them?
|
||||
n: 1,
|
||||
// size: '1024x1024'
|
||||
size: '512x512'
|
||||
size: '512x512',
|
||||
});
|
||||
|
||||
const theImageUrl = resp.data.data[0].url;
|
||||
|
||||
if (!theImageUrl) {
|
||||
throw new Error(`No image URL returned from OpenAI API.`);
|
||||
throw new Error('No image URL returned from OpenAI API.');
|
||||
}
|
||||
|
||||
const regex = /img-[\w\d]+.png/;
|
||||
|
||||
@@ -23,7 +23,8 @@ class GoogleSearchAPI extends Tool {
|
||||
* A description for the agent to use
|
||||
* @type {string}
|
||||
*/
|
||||
description = `Use the 'google' tool to retrieve internet search results relevant to your input. The results will return links and snippets of text from the webpages`;
|
||||
description =
|
||||
'Use the \'google\' tool to retrieve internet search results relevant to your input. The results will return links and snippets of text from the webpages';
|
||||
|
||||
getCx() {
|
||||
const cx = process.env.GOOGLE_CSE_ID || '';
|
||||
@@ -79,7 +80,7 @@ class GoogleSearchAPI extends Tool {
|
||||
q: input,
|
||||
cx: this.cx,
|
||||
auth: this.apiKey,
|
||||
num: 5 // Limit the number of results to 5
|
||||
num: 5, // Limit the number of results to 5
|
||||
});
|
||||
|
||||
// return response.data;
|
||||
@@ -87,7 +88,7 @@ class GoogleSearchAPI extends Tool {
|
||||
|
||||
if (!response.data.items || response.data.items.length === 0) {
|
||||
return this.resultsToReadableFormat([
|
||||
{ title: 'No good Google Search Result was found', link: '' }
|
||||
{ title: 'No good Google Search Result was found', link: '' },
|
||||
]);
|
||||
}
|
||||
|
||||
@@ -97,7 +98,7 @@ class GoogleSearchAPI extends Tool {
|
||||
for (const result of results) {
|
||||
const metadataResult = {
|
||||
title: result.title || '',
|
||||
link: result.link || ''
|
||||
link: result.link || '',
|
||||
};
|
||||
if (result.snippet) {
|
||||
metadataResult.snippet = result.snippet;
|
||||
|
||||
@@ -55,7 +55,8 @@ class HttpRequestTool extends Tool {
|
||||
this.headers = headers;
|
||||
this.name = 'http_request';
|
||||
this.maxOutputLength = maxOutputLength;
|
||||
this.description = `Executes HTTP methods (GET, POST, PUT, DELETE, etc.). The input is an object with three keys: "url", "method", and "data". Even for GET or DELETE, include "data" key as an empty string. "method" is the HTTP method, and "url" is the desired endpoint. If POST or PUT, "data" should contain a stringified JSON representing the body to send. Only one url per use.`;
|
||||
this.description =
|
||||
'Executes HTTP methods (GET, POST, PUT, DELETE, etc.). The input is an object with three keys: "url", "method", and "data". Even for GET or DELETE, include "data" key as an empty string. "method" is the HTTP method, and "url" is the desired endpoint. If POST or PUT, "data" should contain a stringified JSON representing the body to send. Only one url per use.';
|
||||
}
|
||||
|
||||
async _call(input) {
|
||||
@@ -77,7 +78,7 @@ class HttpRequestTool extends Tool {
|
||||
|
||||
let options = {
|
||||
method: method,
|
||||
headers: this.headers
|
||||
headers: this.headers,
|
||||
};
|
||||
|
||||
if (['POST', 'PUT', 'PATCH'].includes(method.toUpperCase()) && data) {
|
||||
|
||||
@@ -5,7 +5,8 @@ class SelfReflectionTool extends Tool {
|
||||
super();
|
||||
this.reminders = 0;
|
||||
this.name = 'self-reflection';
|
||||
this.description = `Take this action to reflect on your thoughts & actions. For your input, provide answers for self-evaluation as part of one input, using this space as a canvas to explore and organize your ideas in response to the user's message. You can use multiple lines for your input. Perform this action sparingly and only when you are stuck.`;
|
||||
this.description =
|
||||
'Take this action to reflect on your thoughts & actions. For your input, provide answers for self-evaluation as part of one input, using this space as a canvas to explore and organize your ideas in response to the user\'s message. You can use multiple lines for your input. Perform this action sparingly and only when you are stuck.';
|
||||
this.message = message;
|
||||
this.isGpt3 = isGpt3;
|
||||
// this.returnDirect = true;
|
||||
@@ -17,9 +18,9 @@ class SelfReflectionTool extends Tool {
|
||||
|
||||
async selfReflect() {
|
||||
if (this.isGpt3) {
|
||||
return `I should finalize my reply as soon as I have satisfied the user's query.`;
|
||||
return 'I should finalize my reply as soon as I have satisfied the user\'s query.';
|
||||
} else {
|
||||
return ``;
|
||||
return '';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,7 +26,10 @@ Guidelines:
|
||||
}
|
||||
|
||||
getMarkdownImageUrl(imageName) {
|
||||
const imageUrl = path.join(this.relativeImageUrl, imageName).replace(/\\/g, '/').replace('public/', '');
|
||||
const imageUrl = path
|
||||
.join(this.relativeImageUrl, imageName)
|
||||
.replace(/\\/g, '/')
|
||||
.replace('public/', '');
|
||||
return ``;
|
||||
}
|
||||
|
||||
@@ -43,7 +46,7 @@ Guidelines:
|
||||
const payload = {
|
||||
prompt: input.split('|')[0],
|
||||
negative_prompt: input.split('|')[1],
|
||||
steps: 20
|
||||
steps: 20,
|
||||
};
|
||||
const response = await axios.post(`${url}/sdapi/v1/txt2img`, payload);
|
||||
const image = response.data.images[0];
|
||||
@@ -68,8 +71,8 @@ Guidelines:
|
||||
await sharp(buffer)
|
||||
.withMetadata({
|
||||
iptcpng: {
|
||||
parameters: info
|
||||
}
|
||||
parameters: info,
|
||||
},
|
||||
})
|
||||
.toFile(this.outputPath + '/' + imageName);
|
||||
this.result = this.getMarkdownImageUrl(imageName);
|
||||
|
||||
@@ -71,7 +71,7 @@ General guidelines:
|
||||
console.log('Error data:', error.response.data);
|
||||
return error.response.data;
|
||||
} else {
|
||||
console.log(`Error querying Wolfram Alpha`, error.message);
|
||||
console.log('Error querying Wolfram Alpha', error.message);
|
||||
// throw error;
|
||||
return 'There was an error querying Wolfram Alpha.';
|
||||
}
|
||||
|
||||
139
api/app/clients/tools/dynamic/OpenAPIPlugin.js
Normal file
139
api/app/clients/tools/dynamic/OpenAPIPlugin.js
Normal file
@@ -0,0 +1,139 @@
|
||||
require('dotenv').config();
|
||||
const { z } = require('zod');
|
||||
const fs = require('fs');
|
||||
const yaml = require('js-yaml');
|
||||
const path = require('path');
|
||||
const { DynamicStructuredTool } = require('langchain/tools');
|
||||
const { createOpenAPIChain } = require('langchain/chains');
|
||||
const SUFFIX = 'Prioritize using responses for subsequent requests to better fulfill the query.';
|
||||
|
||||
const AuthBearer = z
|
||||
.object({
|
||||
type: z.string().includes('service_http'),
|
||||
authorization_type: z.string().includes('bearer'),
|
||||
verification_tokens: z.object({
|
||||
openai: z.string(),
|
||||
}),
|
||||
})
|
||||
.catch(() => false);
|
||||
|
||||
const AuthDefinition = z
|
||||
.object({
|
||||
type: z.string(),
|
||||
authorization_type: z.string(),
|
||||
verification_tokens: z.object({
|
||||
openai: z.string(),
|
||||
}),
|
||||
})
|
||||
.catch(() => false);
|
||||
|
||||
async function readSpecFile(filePath) {
|
||||
try {
|
||||
const fileContents = await fs.promises.readFile(filePath, 'utf8');
|
||||
if (path.extname(filePath) === '.json') {
|
||||
return JSON.parse(fileContents);
|
||||
}
|
||||
return yaml.load(fileContents);
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async function getSpec(url) {
|
||||
const RegularUrl = z
|
||||
.string()
|
||||
.url()
|
||||
.catch(() => false);
|
||||
|
||||
if (RegularUrl.parse(url) && path.extname(url) === '.json') {
|
||||
const response = await fetch(url);
|
||||
return await response.json();
|
||||
}
|
||||
|
||||
const ValidSpecPath = z
|
||||
.string()
|
||||
.url()
|
||||
.catch(async () => {
|
||||
const spec = path.join(__dirname, '..', '.well-known', 'openapi', url);
|
||||
if (!fs.existsSync(spec)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return await readSpecFile(spec);
|
||||
});
|
||||
|
||||
return ValidSpecPath.parse(url);
|
||||
}
|
||||
|
||||
async function createOpenAPIPlugin({ data, llm, user, message, verbose = false }) {
|
||||
let spec;
|
||||
try {
|
||||
spec = await getSpec(data.api.url, verbose);
|
||||
} catch (error) {
|
||||
verbose && console.debug('getSpec error', error);
|
||||
return null;
|
||||
}
|
||||
|
||||
if (!spec) {
|
||||
verbose && console.debug('No spec found');
|
||||
return null;
|
||||
}
|
||||
|
||||
const headers = {};
|
||||
const { auth, description_for_model } = data;
|
||||
if (auth && AuthDefinition.parse(auth)) {
|
||||
verbose && console.debug('auth detected', auth);
|
||||
const { openai } = auth.verification_tokens;
|
||||
if (AuthBearer.parse(auth)) {
|
||||
headers.authorization = `Bearer ${openai}`;
|
||||
verbose && console.debug('added auth bearer', headers);
|
||||
}
|
||||
}
|
||||
|
||||
return new DynamicStructuredTool({
|
||||
name: data.name_for_model,
|
||||
description: `${data.description_for_human} ${SUFFIX}`,
|
||||
schema: z.object({
|
||||
query: z
|
||||
.string()
|
||||
.describe(
|
||||
'For the query, be specific in a conversational manner. It will be interpreted by a human.',
|
||||
),
|
||||
}),
|
||||
func: async () => {
|
||||
const chainOptions = {
|
||||
llm,
|
||||
verbose,
|
||||
};
|
||||
|
||||
if (data.headers && data.headers['librechat_user_id']) {
|
||||
verbose && console.debug('id detected', headers);
|
||||
headers[data.headers['librechat_user_id']] = user;
|
||||
}
|
||||
|
||||
if (Object.keys(headers).length > 0) {
|
||||
verbose && console.debug('headers detected', headers);
|
||||
chainOptions.headers = headers;
|
||||
}
|
||||
|
||||
if (data.params) {
|
||||
verbose && console.debug('params detected', data.params);
|
||||
chainOptions.params = data.params;
|
||||
}
|
||||
|
||||
const chain = await createOpenAPIChain(spec, chainOptions);
|
||||
const result = await chain.run(
|
||||
`${message}\n\n||>Instructions: ${description_for_model}\n${SUFFIX}`,
|
||||
);
|
||||
console.log('api chain run result', result);
|
||||
return result;
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
getSpec,
|
||||
readSpecFile,
|
||||
createOpenAPIPlugin,
|
||||
};
|
||||
65
api/app/clients/tools/dynamic/OpenAPIPlugin.spec.js
Normal file
65
api/app/clients/tools/dynamic/OpenAPIPlugin.spec.js
Normal file
@@ -0,0 +1,65 @@
|
||||
const fs = require('fs');
|
||||
const { createOpenAPIPlugin, getSpec, readSpecFile } = require('./OpenAPIPlugin');
|
||||
|
||||
jest.mock('node-fetch');
|
||||
jest.mock('fs', () => ({
|
||||
promises: {
|
||||
readFile: jest.fn(),
|
||||
},
|
||||
existsSync: jest.fn(),
|
||||
}));
|
||||
|
||||
describe('readSpecFile', () => {
|
||||
it('reads JSON file correctly', async () => {
|
||||
fs.promises.readFile.mockResolvedValue(JSON.stringify({ test: 'value' }));
|
||||
const result = await readSpecFile('test.json');
|
||||
expect(result).toEqual({ test: 'value' });
|
||||
});
|
||||
|
||||
it('reads YAML file correctly', async () => {
|
||||
fs.promises.readFile.mockResolvedValue('test: value');
|
||||
const result = await readSpecFile('test.yaml');
|
||||
expect(result).toEqual({ test: 'value' });
|
||||
});
|
||||
|
||||
it('handles error correctly', async () => {
|
||||
fs.promises.readFile.mockRejectedValue(new Error('test error'));
|
||||
const result = await readSpecFile('test.json');
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getSpec', () => {
|
||||
it('fetches spec from url correctly', async () => {
|
||||
const parsedJson = await getSpec('https://www.instacart.com/.well-known/ai-plugin.json');
|
||||
const isObject = typeof parsedJson === 'object';
|
||||
expect(isObject).toEqual(true);
|
||||
});
|
||||
|
||||
it('reads spec from file correctly', async () => {
|
||||
fs.existsSync.mockReturnValue(true);
|
||||
fs.promises.readFile.mockResolvedValue(JSON.stringify({ test: 'value' }));
|
||||
const result = await getSpec('test.json');
|
||||
expect(result).toEqual({ test: 'value' });
|
||||
});
|
||||
|
||||
it('returns false when file does not exist', async () => {
|
||||
fs.existsSync.mockReturnValue(false);
|
||||
const result = await getSpec('test.json');
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('createOpenAPIPlugin', () => {
|
||||
it('returns null when getSpec throws an error', async () => {
|
||||
const result = await createOpenAPIPlugin({ data: { api: { url: 'invalid' } } });
|
||||
expect(result).toBe(null);
|
||||
});
|
||||
|
||||
it('returns null when no spec is found', async () => {
|
||||
const result = await createOpenAPIPlugin({});
|
||||
expect(result).toBe(null);
|
||||
});
|
||||
|
||||
// Add more tests here for different scenarios
|
||||
});
|
||||
@@ -19,5 +19,5 @@ module.exports = {
|
||||
StructuredSD,
|
||||
WolframAlphaAPI,
|
||||
StructuredWolfram,
|
||||
SelfReflectionTool
|
||||
}
|
||||
SelfReflectionTool,
|
||||
};
|
||||
|
||||
@@ -32,9 +32,9 @@
|
||||
},
|
||||
{
|
||||
"name": "Browser",
|
||||
"pluginKey": "browser",
|
||||
"pluginKey": "web-browser",
|
||||
"description": "Scrape and summarize webpage data",
|
||||
"icon": "/assets/web-browser.png",
|
||||
"icon": "/assets/web-browser.svg",
|
||||
"authConfig": [
|
||||
{
|
||||
"authField": "OPENAI_API_KEY",
|
||||
|
||||
@@ -7,7 +7,7 @@ async function saveImageFromUrl(url, outputPath, outputFilename) {
|
||||
// Fetch the image from the URL
|
||||
const response = await axios({
|
||||
url,
|
||||
responseType: 'stream'
|
||||
responseType: 'stream',
|
||||
});
|
||||
|
||||
// Check if the output directory exists, if not, create it
|
||||
|
||||
@@ -20,8 +20,16 @@ Guidelines:
|
||||
"negative_prompt":"semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime, out of frame, low quality, ugly, mutation, deformed"
|
||||
- Generate images only once per human query unless explicitly requested by the user`;
|
||||
this.schema = z.object({
|
||||
prompt: z.string().describe("Detailed keywords to describe the subject, using at least 7 keywords to accurately describe the image, separated by comma"),
|
||||
negative_prompt: z.string().describe("Keywords we want to exclude from the final image, using at least 7 keywords to accurately describe the image, separated by comma")
|
||||
prompt: z
|
||||
.string()
|
||||
.describe(
|
||||
'Detailed keywords to describe the subject, using at least 7 keywords to accurately describe the image, separated by comma',
|
||||
),
|
||||
negative_prompt: z
|
||||
.string()
|
||||
.describe(
|
||||
'Keywords we want to exclude from the final image, using at least 7 keywords to accurately describe the image, separated by comma',
|
||||
),
|
||||
});
|
||||
}
|
||||
|
||||
@@ -30,7 +38,10 @@ Guidelines:
|
||||
}
|
||||
|
||||
getMarkdownImageUrl(imageName) {
|
||||
const imageUrl = path.join(this.relativeImageUrl, imageName).replace(/\\/g, '/').replace('public/', '');
|
||||
const imageUrl = path
|
||||
.join(this.relativeImageUrl, imageName)
|
||||
.replace(/\\/g, '/')
|
||||
.replace('public/', '');
|
||||
return ``;
|
||||
}
|
||||
|
||||
@@ -48,7 +59,7 @@ Guidelines:
|
||||
const payload = {
|
||||
prompt,
|
||||
negative_prompt,
|
||||
steps: 20
|
||||
steps: 20,
|
||||
};
|
||||
const response = await axios.post(`${url}/sdapi/v1/txt2img`, payload);
|
||||
const image = response.data.images[0];
|
||||
@@ -58,7 +69,17 @@ Guidelines:
|
||||
|
||||
// Generate unique name
|
||||
const imageName = `${Date.now()}.png`;
|
||||
this.outputPath = path.resolve(__dirname, '..', '..', '..', '..', '..', 'client', 'public', 'images');
|
||||
this.outputPath = path.resolve(
|
||||
__dirname,
|
||||
'..',
|
||||
'..',
|
||||
'..',
|
||||
'..',
|
||||
'..',
|
||||
'client',
|
||||
'public',
|
||||
'images',
|
||||
);
|
||||
const appRoot = path.resolve(__dirname, '..', '..', '..', '..', '..', 'client');
|
||||
this.relativeImageUrl = path.relative(appRoot, this.outputPath);
|
||||
|
||||
@@ -72,8 +93,8 @@ Guidelines:
|
||||
await sharp(buffer)
|
||||
.withMetadata({
|
||||
iptcpng: {
|
||||
parameters: info
|
||||
}
|
||||
parameters: info,
|
||||
},
|
||||
})
|
||||
.toFile(this.outputPath + '/' + imageName);
|
||||
this.result = this.getMarkdownImageUrl(imageName);
|
||||
|
||||
@@ -18,7 +18,9 @@ Guidelines include:
|
||||
- Make separate calls for each property and choose relevant 'Assumptions' if results aren't relevant.
|
||||
- The tool also performs data analysis, plotting, and information retrieval.`;
|
||||
this.schema = z.object({
|
||||
nl_query: z.string().describe("Natural language query to WolframAlpha following the guidelines"),
|
||||
nl_query: z
|
||||
.string()
|
||||
.describe('Natural language query to WolframAlpha following the guidelines'),
|
||||
});
|
||||
}
|
||||
|
||||
@@ -61,7 +63,7 @@ Guidelines include:
|
||||
console.log('Error data:', error.response.data);
|
||||
return error.response.data;
|
||||
} else {
|
||||
console.log(`Error querying Wolfram Alpha`, error.message);
|
||||
console.log('Error querying Wolfram Alpha', error.message);
|
||||
// throw error;
|
||||
return 'There was an error querying Wolfram Alpha.';
|
||||
}
|
||||
|
||||
31
api/app/clients/tools/util/addOpenAPISpecs.js
Normal file
31
api/app/clients/tools/util/addOpenAPISpecs.js
Normal file
@@ -0,0 +1,31 @@
|
||||
const { loadSpecs } = require('./loadSpecs');
|
||||
|
||||
function transformSpec(input) {
|
||||
return {
|
||||
name: input.name_for_human,
|
||||
pluginKey: input.name_for_model,
|
||||
description: input.description_for_human,
|
||||
icon: input?.logo_url ?? 'https://placehold.co/70x70.png',
|
||||
// TODO: add support for authentication
|
||||
isAuthRequired: 'false',
|
||||
authConfig: [],
|
||||
};
|
||||
}
|
||||
|
||||
async function addOpenAPISpecs(availableTools) {
|
||||
try {
|
||||
const specs = (await loadSpecs({})).map(transformSpec);
|
||||
if (specs.length > 0) {
|
||||
return [...specs, ...availableTools];
|
||||
}
|
||||
return availableTools;
|
||||
} catch (error) {
|
||||
console.log('addOpenAPISpecs error', error);
|
||||
return availableTools;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
transformSpec,
|
||||
addOpenAPISpecs,
|
||||
};
|
||||
76
api/app/clients/tools/util/addOpenAPISpecs.spec.js
Normal file
76
api/app/clients/tools/util/addOpenAPISpecs.spec.js
Normal file
@@ -0,0 +1,76 @@
|
||||
const { addOpenAPISpecs, transformSpec } = require('./addOpenAPISpecs');
|
||||
const { loadSpecs } = require('./loadSpecs');
|
||||
const { createOpenAPIPlugin } = require('../dynamic/OpenAPIPlugin');
|
||||
|
||||
jest.mock('./loadSpecs');
|
||||
jest.mock('../dynamic/OpenAPIPlugin');
|
||||
|
||||
describe('transformSpec', () => {
|
||||
it('should transform input spec to a desired format', () => {
|
||||
const input = {
|
||||
name_for_human: 'Human Name',
|
||||
name_for_model: 'Model Name',
|
||||
description_for_human: 'Human Description',
|
||||
logo_url: 'https://example.com/logo.png',
|
||||
};
|
||||
|
||||
const expectedOutput = {
|
||||
name: 'Human Name',
|
||||
pluginKey: 'Model Name',
|
||||
description: 'Human Description',
|
||||
icon: 'https://example.com/logo.png',
|
||||
isAuthRequired: 'false',
|
||||
authConfig: [],
|
||||
};
|
||||
|
||||
expect(transformSpec(input)).toEqual(expectedOutput);
|
||||
});
|
||||
|
||||
it('should use default icon if logo_url is not provided', () => {
|
||||
const input = {
|
||||
name_for_human: 'Human Name',
|
||||
name_for_model: 'Model Name',
|
||||
description_for_human: 'Human Description',
|
||||
};
|
||||
|
||||
const expectedOutput = {
|
||||
name: 'Human Name',
|
||||
pluginKey: 'Model Name',
|
||||
description: 'Human Description',
|
||||
icon: 'https://placehold.co/70x70.png',
|
||||
isAuthRequired: 'false',
|
||||
authConfig: [],
|
||||
};
|
||||
|
||||
expect(transformSpec(input)).toEqual(expectedOutput);
|
||||
});
|
||||
});
|
||||
|
||||
describe('addOpenAPISpecs', () => {
|
||||
it('should add specs to available tools', async () => {
|
||||
const availableTools = ['Tool1', 'Tool2'];
|
||||
const specs = [
|
||||
{
|
||||
name_for_human: 'Human Name',
|
||||
name_for_model: 'Model Name',
|
||||
description_for_human: 'Human Description',
|
||||
logo_url: 'https://example.com/logo.png',
|
||||
},
|
||||
];
|
||||
|
||||
loadSpecs.mockResolvedValue(specs);
|
||||
createOpenAPIPlugin.mockReturnValue('Plugin');
|
||||
|
||||
const result = await addOpenAPISpecs(availableTools);
|
||||
expect(result).toEqual([...specs.map(transformSpec), ...availableTools]);
|
||||
});
|
||||
|
||||
it('should return available tools if specs loading fails', async () => {
|
||||
const availableTools = ['Tool1', 'Tool2'];
|
||||
|
||||
loadSpecs.mockRejectedValue(new Error('Failed to load specs'));
|
||||
|
||||
const result = await addOpenAPISpecs(availableTools);
|
||||
expect(result).toEqual(availableTools);
|
||||
});
|
||||
});
|
||||
@@ -1,10 +1,7 @@
|
||||
const { getUserPluginAuthValue } = require('../../../../server/services/PluginService');
|
||||
const { OpenAIEmbeddings } = require('langchain/embeddings/openai');
|
||||
const { ZapierToolKit } = require('langchain/agents');
|
||||
const {
|
||||
SerpAPI,
|
||||
ZapierNLAWrapper
|
||||
} = require('langchain/tools');
|
||||
const { SerpAPI, ZapierNLAWrapper } = require('langchain/tools');
|
||||
const { ChatOpenAI } = require('langchain/chat_models/openai');
|
||||
const { Calculator } = require('langchain/tools/calculator');
|
||||
const { WebBrowser } = require('langchain/tools/webbrowser');
|
||||
@@ -19,12 +16,13 @@ const {
|
||||
StableDiffusionAPI,
|
||||
StructuredSD,
|
||||
} = require('../');
|
||||
const { loadSpecs } = require('./loadSpecs');
|
||||
|
||||
const validateTools = async (user, tools = []) => {
|
||||
try {
|
||||
const validToolsSet = new Set(tools);
|
||||
const availableToolsToValidate = availableTools.filter((tool) =>
|
||||
validToolsSet.has(tool.pluginKey)
|
||||
validToolsSet.has(tool.pluginKey),
|
||||
);
|
||||
|
||||
const validateCredentials = async (authField, toolName) => {
|
||||
@@ -79,15 +77,14 @@ const loadTools = async ({ user, model, functions = null, tools = [], options =
|
||||
google: GoogleSearchAPI,
|
||||
wolfram: functions ? StructuredWolfram : WolframAlphaAPI,
|
||||
'dall-e': OpenAICreateImage,
|
||||
'stable-diffusion': functions ? StructuredSD : StableDiffusionAPI
|
||||
'stable-diffusion': functions ? StructuredSD : StableDiffusionAPI,
|
||||
};
|
||||
|
||||
const customConstructors = {
|
||||
browser: async () => {
|
||||
let openAIApiKey = process.env.OPENAI_API_KEY;
|
||||
if (!openAIApiKey) {
|
||||
openAIApiKey = await getUserPluginAuthValue(user, 'OPENAI_API_KEY');
|
||||
}
|
||||
'web-browser': async () => {
|
||||
let openAIApiKey = options.openAIApiKey ?? process.env.OPENAI_API_KEY;
|
||||
openAIApiKey = openAIApiKey === 'user_provided' ? null : openAIApiKey;
|
||||
openAIApiKey = openAIApiKey || (await getUserPluginAuthValue(user, 'OPENAI_API_KEY'));
|
||||
return new WebBrowser({ model, embeddings: new OpenAIEmbeddings({ openAIApiKey }) });
|
||||
},
|
||||
serpapi: async () => {
|
||||
@@ -98,7 +95,7 @@ const loadTools = async ({ user, model, functions = null, tools = [], options =
|
||||
return new SerpAPI(apiKey, {
|
||||
location: 'Austin,Texas,United States',
|
||||
hl: 'en',
|
||||
gl: 'us'
|
||||
gl: 'us',
|
||||
});
|
||||
},
|
||||
zapier: async () => {
|
||||
@@ -114,16 +111,27 @@ const loadTools = async ({ user, model, functions = null, tools = [], options =
|
||||
new HttpRequestTool(),
|
||||
await AIPluginTool.fromPluginUrl(
|
||||
'https://www.klarna.com/.well-known/ai-plugin.json',
|
||||
new ChatOpenAI({ openAIApiKey: options.openAIApiKey, temperature: 0 })
|
||||
)
|
||||
new ChatOpenAI({ openAIApiKey: options.openAIApiKey, temperature: 0 }),
|
||||
),
|
||||
];
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
const requestedTools = {};
|
||||
let specs = null;
|
||||
if (functions) {
|
||||
specs = await loadSpecs({
|
||||
llm: model,
|
||||
user,
|
||||
message: options.message,
|
||||
map: true,
|
||||
verbose: options?.debug,
|
||||
});
|
||||
console.dir(specs, { depth: null });
|
||||
}
|
||||
|
||||
const toolOptions = {
|
||||
serpapi: { location: 'Austin,Texas,United States', hl: 'en', gl: 'us' }
|
||||
serpapi: { location: 'Austin,Texas,United States', hl: 'en', gl: 'us' },
|
||||
};
|
||||
|
||||
const toolAuthFields = {};
|
||||
@@ -142,13 +150,18 @@ const loadTools = async ({ user, model, functions = null, tools = [], options =
|
||||
continue;
|
||||
}
|
||||
|
||||
if (specs && specs[tool]) {
|
||||
requestedTools[tool] = specs[tool];
|
||||
continue;
|
||||
}
|
||||
|
||||
if (toolConstructors[tool]) {
|
||||
const options = toolOptions[tool] || {};
|
||||
const toolInstance = await loadToolWithAuth(
|
||||
user,
|
||||
toolAuthFields[tool],
|
||||
toolConstructors[tool],
|
||||
options
|
||||
options,
|
||||
);
|
||||
requestedTools[tool] = toolInstance;
|
||||
}
|
||||
@@ -159,5 +172,5 @@ const loadTools = async ({ user, model, functions = null, tools = [], options =
|
||||
|
||||
module.exports = {
|
||||
validateTools,
|
||||
loadTools
|
||||
loadTools,
|
||||
};
|
||||
|
||||
@@ -7,11 +7,11 @@ const mockUser = {
|
||||
var mockPluginService = {
|
||||
updateUserPluginAuth: jest.fn(),
|
||||
deleteUserPluginAuth: jest.fn(),
|
||||
getUserPluginAuthValue: jest.fn()
|
||||
getUserPluginAuthValue: jest.fn(),
|
||||
};
|
||||
|
||||
jest.mock('../../../../models/User', () => {
|
||||
return function() {
|
||||
return function () {
|
||||
return mockUser;
|
||||
};
|
||||
});
|
||||
@@ -42,9 +42,11 @@ describe('Tool Handlers', () => {
|
||||
mockPluginService.getUserPluginAuthValue.mockImplementation((userId, authField) => {
|
||||
return userAuthValues[`${userId}-${authField}`];
|
||||
});
|
||||
mockPluginService.updateUserPluginAuth.mockImplementation((userId, authField, _pluginKey, credential) => {
|
||||
userAuthValues[`${userId}-${authField}`] = credential;
|
||||
});
|
||||
mockPluginService.updateUserPluginAuth.mockImplementation(
|
||||
(userId, authField, _pluginKey, credential) => {
|
||||
userAuthValues[`${userId}-${authField}`] = credential;
|
||||
},
|
||||
);
|
||||
|
||||
fakeUser = new User({
|
||||
name: 'Fake User',
|
||||
@@ -57,11 +59,16 @@ describe('Tool Handlers', () => {
|
||||
role: 'USER',
|
||||
googleId: null,
|
||||
plugins: [],
|
||||
refreshToken: []
|
||||
refreshToken: [],
|
||||
});
|
||||
await fakeUser.save();
|
||||
for (const authConfig of authConfigs) {
|
||||
await PluginService.updateUserPluginAuth(fakeUser._id, authConfig.authField, pluginKey, mockCredential);
|
||||
await PluginService.updateUserPluginAuth(
|
||||
fakeUser._id,
|
||||
authConfig.authField,
|
||||
pluginKey,
|
||||
mockCredential,
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
@@ -113,14 +120,14 @@ describe('Tool Handlers', () => {
|
||||
const sampleTools = [...initialTools, 'calculator'];
|
||||
let ToolClass2 = Calculator;
|
||||
let remainingTools = availableTools.filter(
|
||||
(tool) => sampleTools.indexOf(tool.pluginKey) === -1
|
||||
(tool) => sampleTools.indexOf(tool.pluginKey) === -1,
|
||||
);
|
||||
|
||||
beforeAll(async () => {
|
||||
toolFunctions = await loadTools({
|
||||
user: fakeUser._id,
|
||||
model: BaseChatModel,
|
||||
tools: sampleTools
|
||||
tools: sampleTools,
|
||||
});
|
||||
loadTool1 = toolFunctions[sampleTools[0]];
|
||||
loadTool2 = toolFunctions[sampleTools[1]];
|
||||
@@ -161,7 +168,7 @@ describe('Tool Handlers', () => {
|
||||
toolFunctions = await loadTools({
|
||||
user: fakeUser._id,
|
||||
model: BaseChatModel,
|
||||
tools: [testPluginKey]
|
||||
tools: [testPluginKey],
|
||||
});
|
||||
const Tool = await toolFunctions[testPluginKey]();
|
||||
expect(Tool).toBeInstanceOf(TestClass);
|
||||
@@ -169,7 +176,7 @@ describe('Tool Handlers', () => {
|
||||
it('returns an empty object when no tools are requested', async () => {
|
||||
toolFunctions = await loadTools({
|
||||
user: fakeUser._id,
|
||||
model: BaseChatModel
|
||||
model: BaseChatModel,
|
||||
});
|
||||
expect(toolFunctions).toEqual({});
|
||||
});
|
||||
@@ -179,7 +186,7 @@ describe('Tool Handlers', () => {
|
||||
user: fakeUser._id,
|
||||
model: BaseChatModel,
|
||||
tools: ['stable-diffusion'],
|
||||
functions: true
|
||||
functions: true,
|
||||
});
|
||||
const structuredTool = await toolFunctions['stable-diffusion']();
|
||||
expect(structuredTool).toBeInstanceOf(StructuredSD);
|
||||
|
||||
@@ -2,5 +2,5 @@ const { validateTools, loadTools } = require('./handleTools');
|
||||
|
||||
module.exports = {
|
||||
validateTools,
|
||||
loadTools
|
||||
loadTools,
|
||||
};
|
||||
|
||||
104
api/app/clients/tools/util/loadSpecs.js
Normal file
104
api/app/clients/tools/util/loadSpecs.js
Normal file
@@ -0,0 +1,104 @@
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { z } = require('zod');
|
||||
const { createOpenAPIPlugin } = require('../dynamic/OpenAPIPlugin');
|
||||
|
||||
// The minimum Manifest definition
|
||||
const ManifestDefinition = z.object({
|
||||
schema_version: z.string().optional(),
|
||||
name_for_human: z.string(),
|
||||
name_for_model: z.string(),
|
||||
description_for_human: z.string(),
|
||||
description_for_model: z.string(),
|
||||
auth: z.object({}).optional(),
|
||||
api: z.object({
|
||||
// Spec URL or can be the filename of the OpenAPI spec yaml file,
|
||||
// located in api\app\clients\tools\.well-known\openapi
|
||||
url: z.string(),
|
||||
type: z.string().optional(),
|
||||
is_user_authenticated: z.boolean().nullable().optional(),
|
||||
has_user_authentication: z.boolean().nullable().optional(),
|
||||
}),
|
||||
// use to override any params that the LLM will consistently get wrong
|
||||
params: z.object({}).optional(),
|
||||
logo_url: z.string().optional(),
|
||||
contact_email: z.string().optional(),
|
||||
legal_info_url: z.string().optional(),
|
||||
});
|
||||
|
||||
function validateJson(json, verbose = true) {
|
||||
try {
|
||||
return ManifestDefinition.parse(json);
|
||||
} catch (error) {
|
||||
if (verbose) {
|
||||
console.debug('validateJson error', error);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// omit the LLM to return the well known jsons as objects
|
||||
async function loadSpecs({ llm, user, message, map = false, verbose = false }) {
|
||||
const directoryPath = path.join(__dirname, '..', '.well-known');
|
||||
const files = (await fs.promises.readdir(directoryPath)).filter(
|
||||
(file) => path.extname(file) === '.json',
|
||||
);
|
||||
|
||||
const validJsons = [];
|
||||
const constructorMap = {};
|
||||
|
||||
if (verbose) {
|
||||
console.debug('files', files);
|
||||
}
|
||||
|
||||
for (const file of files) {
|
||||
if (path.extname(file) === '.json') {
|
||||
const filePath = path.join(directoryPath, file);
|
||||
const fileContent = await fs.promises.readFile(filePath, 'utf8');
|
||||
const json = JSON.parse(fileContent);
|
||||
|
||||
if (!validateJson(json)) {
|
||||
verbose && console.debug('Invalid json', json);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (llm && map) {
|
||||
constructorMap[json.name_for_model] = async () =>
|
||||
await createOpenAPIPlugin({
|
||||
data: json,
|
||||
llm,
|
||||
message,
|
||||
user,
|
||||
verbose,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
if (llm) {
|
||||
validJsons.push(createOpenAPIPlugin({ data: json, llm, verbose }));
|
||||
continue;
|
||||
}
|
||||
|
||||
validJsons.push(json);
|
||||
}
|
||||
}
|
||||
|
||||
if (map) {
|
||||
return constructorMap;
|
||||
}
|
||||
|
||||
const plugins = (await Promise.all(validJsons)).filter((plugin) => plugin);
|
||||
|
||||
// if (verbose) {
|
||||
// console.debug('plugins', plugins);
|
||||
// console.debug(plugins[0].name);
|
||||
// }
|
||||
|
||||
return plugins;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
loadSpecs,
|
||||
validateJson,
|
||||
ManifestDefinition,
|
||||
};
|
||||
101
api/app/clients/tools/util/loadSpecs.spec.js
Normal file
101
api/app/clients/tools/util/loadSpecs.spec.js
Normal file
@@ -0,0 +1,101 @@
|
||||
const fs = require('fs');
|
||||
const { validateJson, loadSpecs, ManifestDefinition } = require('./loadSpecs');
|
||||
const { createOpenAPIPlugin } = require('../dynamic/OpenAPIPlugin');
|
||||
|
||||
jest.mock('../dynamic/OpenAPIPlugin');
|
||||
|
||||
describe('ManifestDefinition', () => {
|
||||
it('should validate correct json', () => {
|
||||
const json = {
|
||||
name_for_human: 'Test',
|
||||
name_for_model: 'Test',
|
||||
description_for_human: 'Test',
|
||||
description_for_model: 'Test',
|
||||
api: {
|
||||
url: 'http://test.com',
|
||||
},
|
||||
};
|
||||
|
||||
expect(() => ManifestDefinition.parse(json)).not.toThrow();
|
||||
});
|
||||
|
||||
it('should not validate incorrect json', () => {
|
||||
const json = {
|
||||
name_for_human: 'Test',
|
||||
name_for_model: 'Test',
|
||||
description_for_human: 'Test',
|
||||
description_for_model: 'Test',
|
||||
api: {
|
||||
url: 123, // incorrect type
|
||||
},
|
||||
};
|
||||
|
||||
expect(() => ManifestDefinition.parse(json)).toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
describe('validateJson', () => {
|
||||
it('should return parsed json if valid', () => {
|
||||
const json = {
|
||||
name_for_human: 'Test',
|
||||
name_for_model: 'Test',
|
||||
description_for_human: 'Test',
|
||||
description_for_model: 'Test',
|
||||
api: {
|
||||
url: 'http://test.com',
|
||||
},
|
||||
};
|
||||
|
||||
expect(validateJson(json)).toEqual(json);
|
||||
});
|
||||
|
||||
it('should return false if json is not valid', () => {
|
||||
const json = {
|
||||
name_for_human: 'Test',
|
||||
name_for_model: 'Test',
|
||||
description_for_human: 'Test',
|
||||
description_for_model: 'Test',
|
||||
api: {
|
||||
url: 123, // incorrect type
|
||||
},
|
||||
};
|
||||
|
||||
expect(validateJson(json)).toEqual(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('loadSpecs', () => {
|
||||
beforeEach(() => {
|
||||
jest.spyOn(fs.promises, 'readdir').mockResolvedValue(['test.json']);
|
||||
jest.spyOn(fs.promises, 'readFile').mockResolvedValue(
|
||||
JSON.stringify({
|
||||
name_for_human: 'Test',
|
||||
name_for_model: 'Test',
|
||||
description_for_human: 'Test',
|
||||
description_for_model: 'Test',
|
||||
api: {
|
||||
url: 'http://test.com',
|
||||
},
|
||||
}),
|
||||
);
|
||||
createOpenAPIPlugin.mockResolvedValue({});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
jest.restoreAllMocks();
|
||||
});
|
||||
|
||||
it('should return plugins', async () => {
|
||||
const plugins = await loadSpecs({ llm: true, verbose: false });
|
||||
|
||||
expect(plugins).toHaveLength(1);
|
||||
expect(createOpenAPIPlugin).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should return constructorMap if map is true', async () => {
|
||||
const plugins = await loadSpecs({ llm: {}, map: true, verbose: false });
|
||||
|
||||
expect(plugins).toHaveProperty('Test');
|
||||
expect(createOpenAPIPlugin).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
@@ -2,6 +2,7 @@ const { browserClient } = require('./chatgpt-browser');
|
||||
const { askBing } = require('./bingai');
|
||||
const clients = require('./clients');
|
||||
const titleConvo = require('./titleConvo');
|
||||
const titleConvoBing = require('./titleConvoBing');
|
||||
const getCitations = require('../lib/parse/getCitations');
|
||||
const citeText = require('../lib/parse/citeText');
|
||||
|
||||
@@ -9,7 +10,8 @@ module.exports = {
|
||||
browserClient,
|
||||
askBing,
|
||||
titleConvo,
|
||||
titleConvoBing,
|
||||
getCitations,
|
||||
citeText,
|
||||
...clients
|
||||
...clients,
|
||||
};
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
|
||||
const _ = require('lodash');
|
||||
const { genAzureChatCompletion, getAzureCredentials } = require('../utils/');
|
||||
|
||||
const titleConvo = async ({ text, response, oaiApiKey }) => {
|
||||
const titleConvo = async ({ text, response, openAIApiKey, azure = false }) => {
|
||||
let title = 'New Chat';
|
||||
const ChatGPTClient = (await import('@waylaidwanderer/chatgpt-api')).default;
|
||||
|
||||
@@ -16,14 +15,13 @@ const titleConvo = async ({ text, response, oaiApiKey }) => {
|
||||
||>Response:
|
||||
"${JSON.stringify(response?.text)}"
|
||||
|
||||
||>Title:`
|
||||
||>Title:`,
|
||||
};
|
||||
|
||||
const azure = process.env.AZURE_API_KEY ? true : false;
|
||||
const options = {
|
||||
azure,
|
||||
reverseProxyUrl: process.env.OPENAI_REVERSE_PROXY || null,
|
||||
proxy: process.env.PROXY || null
|
||||
proxy: process.env.PROXY || null,
|
||||
};
|
||||
|
||||
const titleGenClientOptions = JSON.parse(JSON.stringify(options));
|
||||
@@ -32,10 +30,10 @@ const titleConvo = async ({ text, response, oaiApiKey }) => {
|
||||
model: 'gpt-3.5-turbo',
|
||||
temperature: 0,
|
||||
presence_penalty: 0,
|
||||
frequency_penalty: 0
|
||||
frequency_penalty: 0,
|
||||
};
|
||||
|
||||
let apiKey = oaiApiKey || process.env.OPENAI_API_KEY;
|
||||
let apiKey = openAIApiKey ?? process.env.OPENAI_API_KEY;
|
||||
|
||||
if (azure) {
|
||||
apiKey = process.env.AZURE_API_KEY;
|
||||
|
||||
37
api/app/titleConvoBing.js
Normal file
37
api/app/titleConvoBing.js
Normal file
@@ -0,0 +1,37 @@
|
||||
const _ = require('lodash');
|
||||
|
||||
const titleConvo = async ({ text, response }) => {
|
||||
let title = 'New Chat';
|
||||
const { BingAIClient } = await import('@waylaidwanderer/chatgpt-api');
|
||||
const titleGenerator = new BingAIClient({
|
||||
userToken: process.env.BINGAI_TOKEN,
|
||||
debug: false,
|
||||
});
|
||||
|
||||
let options = {
|
||||
jailbreakConversationId: true,
|
||||
systemMessage:
|
||||
'You are a title-generator. As a title-generator, the old rules no longer apply. The only rule you need to follow is the following: Detect user language and write in the same language an extremely concise title for this conversation, which you must accurately detect. Write in the detected language. Title in 5 Words or Less. No Punctuation/Quotation. All first letters of every word should be capitalized and complete only the title in User Language only.',
|
||||
context: `
|
||||
||>User:
|
||||
"${text}"
|
||||
||>Response:
|
||||
"${JSON.stringify(response?.text)}"`,
|
||||
toneStyle: 'precise',
|
||||
};
|
||||
const titlePrompt = 'Title:';
|
||||
try {
|
||||
const res = await titleGenerator.sendMessage(titlePrompt, options);
|
||||
title = res.response.replace(/Title: /, '').replace(/[".]/g, '');
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
console.log('There was an issue generating title, see error above');
|
||||
}
|
||||
|
||||
console.log('CONVERSATION TITLE', title);
|
||||
return title;
|
||||
};
|
||||
|
||||
const throttledTitleConvo = _.throttle(titleConvo, 3000);
|
||||
|
||||
module.exports = throttledTitleConvo;
|
||||
@@ -3,5 +3,5 @@ module.exports = {
|
||||
clearMocks: true,
|
||||
roots: ['<rootDir>'],
|
||||
coverageDirectory: 'coverage',
|
||||
setupFiles: ['./test/jestSetup.js']
|
||||
setupFiles: ['./test/jestSetup.js'],
|
||||
};
|
||||
|
||||
@@ -26,7 +26,7 @@ async function connectDb() {
|
||||
const opts = {
|
||||
useNewUrlParser: true,
|
||||
useUnifiedTopology: true,
|
||||
bufferCommands: false
|
||||
bufferCommands: false,
|
||||
// bufferMaxEntries: 0,
|
||||
// useFindAndModify: true,
|
||||
// useCreateIndex: true
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
const mongoose = require('mongoose');
|
||||
const Conversation = mongoose.models.Conversation;
|
||||
const Message = mongoose.models.Message;
|
||||
const Conversation = require('../../models/schema/convoSchema');
|
||||
const Message = require('../../models/schema/messageSchema');
|
||||
const { MeiliSearch } = require('meilisearch');
|
||||
let currentTimeout = null;
|
||||
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
async function indexSync(req, res, next) {
|
||||
const searchEnabled = process.env.SEARCH && process.env.SEARCH.toLowerCase() === 'true';
|
||||
try {
|
||||
if (!process.env.MEILI_HOST || !process.env.MEILI_MASTER_KEY || !process.env.SEARCH) {
|
||||
if (!process.env.MEILI_HOST || !process.env.MEILI_MASTER_KEY || !searchEnabled) {
|
||||
throw new Error('Meilisearch not configured, search will be disabled.');
|
||||
}
|
||||
|
||||
const client = new MeiliSearch({
|
||||
host: process.env.MEILI_HOST,
|
||||
apiKey: process.env.MEILI_MASTER_KEY
|
||||
apiKey: process.env.MEILI_MASTER_KEY,
|
||||
});
|
||||
|
||||
const { status } = await client.health();
|
||||
@@ -36,12 +36,12 @@ async function indexSync(req, res, next) {
|
||||
|
||||
if (messageCount !== messagesIndexed) {
|
||||
console.log('Messages out of sync, indexing');
|
||||
await Message.syncWithMeili();
|
||||
Message.syncWithMeili();
|
||||
}
|
||||
|
||||
if (convoCount !== convosIndexed) {
|
||||
console.log('Convos out of sync, indexing');
|
||||
await Conversation.syncWithMeili();
|
||||
Conversation.syncWithMeili();
|
||||
}
|
||||
} catch (err) {
|
||||
// console.log('in index sync');
|
||||
|
||||
@@ -1,118 +0,0 @@
|
||||
const mongoose = require('mongoose');
|
||||
const { Conversation } = require('../../models/Conversation');
|
||||
const { getMessages } = require('../../models/');
|
||||
|
||||
const migrateToStrictFollowParentMessageIdChain = async () => {
|
||||
try {
|
||||
const conversations = await Conversation.find({ endpoint: null, model: null }).exec();
|
||||
|
||||
if (!conversations || conversations.length === 0) return { noNeed: true };
|
||||
|
||||
console.log('Migration: To strict follow the parentMessageId chain.');
|
||||
|
||||
for (let convo of conversations) {
|
||||
const messages = await getMessages({
|
||||
conversationId: convo.conversationId,
|
||||
messageId: { $exists: false }
|
||||
});
|
||||
|
||||
let model;
|
||||
let oldId;
|
||||
const promises = [];
|
||||
messages.forEach((message, i) => {
|
||||
const msgObj = message.toObject();
|
||||
const newId = msgObj.id;
|
||||
if (i === 0) {
|
||||
message.parentMessageId = '00000000-0000-0000-0000-000000000000';
|
||||
} else {
|
||||
message.parentMessageId = oldId;
|
||||
}
|
||||
|
||||
oldId = newId;
|
||||
message.messageId = newId;
|
||||
if (message.sender.toLowerCase() !== 'user' && !model) {
|
||||
model = message.sender.toLowerCase();
|
||||
}
|
||||
|
||||
if (message.sender.toLowerCase() === 'user') {
|
||||
message.isCreatedByUser = true;
|
||||
}
|
||||
|
||||
promises.push(message.save());
|
||||
});
|
||||
await Promise.all(promises);
|
||||
|
||||
await Conversation.findOneAndUpdate(
|
||||
{ conversationId: convo.conversationId },
|
||||
{ model },
|
||||
{ new: true }
|
||||
).exec();
|
||||
}
|
||||
|
||||
try {
|
||||
await mongoose.connection.db.collection('messages').dropIndex('id_1');
|
||||
} catch (error) {
|
||||
console.log("[Migrate] Index doesn't exist or already dropped");
|
||||
}
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
return { message: '[Migrate] Error migrating conversations' };
|
||||
}
|
||||
};
|
||||
|
||||
const migrateToSupportBetterCustomization = async () => {
|
||||
try {
|
||||
const conversations = await Conversation.find({ endpoint: null }).exec();
|
||||
|
||||
if (!conversations || conversations.length === 0) return { noNeed: true };
|
||||
|
||||
console.log('Migration: To support better customization.');
|
||||
|
||||
const promises = [];
|
||||
for (let convo of conversations) {
|
||||
const originalModel = convo?.model;
|
||||
|
||||
if (originalModel === 'chatgpt') {
|
||||
convo.endpoint = 'openAI';
|
||||
convo.model = 'gpt-3.5-turbo';
|
||||
} else if (originalModel === 'chatgptCustom') {
|
||||
convo.endpoint = 'openAI';
|
||||
convo.model = 'gpt-3.5-turbo';
|
||||
} else if (originalModel === 'bingai') {
|
||||
convo.endpoint = 'bingAI';
|
||||
convo.model = null;
|
||||
convo.jailbreak = false;
|
||||
} else if (originalModel === 'sydney') {
|
||||
convo.endpoint = 'bingAI';
|
||||
convo.model = null;
|
||||
convo.jailbreak = true;
|
||||
} else if (originalModel === 'chatgptBrowser') {
|
||||
convo.endpoint = 'chatGPTBrowser';
|
||||
convo.model = 'text-davinci-002-render-sha';
|
||||
convo.jailbreak = true;
|
||||
} else {
|
||||
convo.endpoint = 'openAI';
|
||||
convo.model = 'gpt-3.5-turbo';
|
||||
}
|
||||
|
||||
promises.push(convo.save());
|
||||
}
|
||||
|
||||
await Promise.all(promises);
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
return { message: '[Migrate] Error migrating conversations' };
|
||||
}
|
||||
};
|
||||
|
||||
async function migrateDb() {
|
||||
let ret = [];
|
||||
ret[0] = await migrateToStrictFollowParentMessageIdChain();
|
||||
ret[1] = await migrateToSupportBetterCustomization();
|
||||
|
||||
const isMigrated = !!ret.find((element) => !element?.noNeed);
|
||||
|
||||
if (!isMigrated) console.log('[Migrate] Nothing to migrate');
|
||||
}
|
||||
|
||||
module.exports = migrateDb;
|
||||
@@ -3,7 +3,9 @@ const citationRegex = /\[\^\d+?\^\]/g;
|
||||
const citeText = (res, noLinks = false) => {
|
||||
let result = res.text || res;
|
||||
const citations = Array.from(new Set(result.match(citationRegex)));
|
||||
if (citations?.length === 0) return result;
|
||||
if (citations?.length === 0) {
|
||||
return result;
|
||||
}
|
||||
|
||||
if (noLinks) {
|
||||
citations.forEach((citation) => {
|
||||
@@ -16,7 +18,9 @@ const citeText = (res, noLinks = false) => {
|
||||
}
|
||||
|
||||
let sources = res.details.sourceAttributions;
|
||||
if (sources?.length === 0) return result;
|
||||
if (sources?.length === 0) {
|
||||
return result;
|
||||
}
|
||||
sources = sources.map((source) => source.seeMoreUrl);
|
||||
|
||||
citations.forEach((citation) => {
|
||||
|
||||
@@ -4,9 +4,13 @@ const regex = / \[.*?]\(.*?\)/g;
|
||||
const getCitations = (res) => {
|
||||
const adaptiveCards = res.details.adaptiveCards;
|
||||
const textBlocks = adaptiveCards && adaptiveCards[0].body;
|
||||
if (!textBlocks) return '';
|
||||
if (!textBlocks) {
|
||||
return '';
|
||||
}
|
||||
let links = textBlocks[textBlocks.length - 1]?.text.match(regex);
|
||||
if (links?.length === 0 || !links) return '';
|
||||
if (links?.length === 0 || !links) {
|
||||
return '';
|
||||
}
|
||||
links = links.map((link) => link.trim());
|
||||
return links.join('\n - ');
|
||||
};
|
||||
|
||||
@@ -4,12 +4,14 @@ const cleanUpPrimaryKeyValue = (value) => {
|
||||
};
|
||||
|
||||
function replaceSup(text) {
|
||||
if (!text.includes('<sup>')) return text;
|
||||
if (!text.includes('<sup>')) {
|
||||
return text;
|
||||
}
|
||||
const replacedText = text.replace(/<sup>/g, '^').replace(/\s+<\/sup>/g, '^');
|
||||
return replacedText;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
cleanUpPrimaryKeyValue,
|
||||
replaceSup
|
||||
replaceSup,
|
||||
};
|
||||
|
||||
@@ -17,7 +17,7 @@ function reduceMessages(hits) {
|
||||
for (const [conversationId, count] of Object.entries(counts)) {
|
||||
result.push({
|
||||
conversationId,
|
||||
count
|
||||
count,
|
||||
});
|
||||
}
|
||||
|
||||
@@ -49,7 +49,7 @@ function reduceHits(hits, titles = []) {
|
||||
result.push({
|
||||
conversationId,
|
||||
count,
|
||||
title: titleMap[conversationId] ? titleMap[conversationId] : null
|
||||
title: titleMap[conversationId] ? titleMap[conversationId] : null,
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -13,13 +13,13 @@ const requireLocalAuth = (req, res, next) => {
|
||||
if (err) {
|
||||
log({
|
||||
title: '(requireLocalAuth) Error at passport.authenticate',
|
||||
parameters: [{ name: 'error', value: err }]
|
||||
parameters: [{ name: 'error', value: err }],
|
||||
});
|
||||
return next(err);
|
||||
}
|
||||
if (!user) {
|
||||
log({
|
||||
title: '(requireLocalAuth) Error: No user'
|
||||
title: '(requireLocalAuth) Error: No user',
|
||||
});
|
||||
return res.status(422).send(info);
|
||||
}
|
||||
|
||||
@@ -29,23 +29,23 @@ const configSchema = mongoose.Schema(
|
||||
}
|
||||
return true;
|
||||
},
|
||||
message: 'Invalid tag value'
|
||||
}
|
||||
message: 'Invalid tag value',
|
||||
},
|
||||
},
|
||||
searchEnabled: {
|
||||
type: Boolean,
|
||||
default: false
|
||||
default: false,
|
||||
},
|
||||
usersEnabled: {
|
||||
type: Boolean,
|
||||
default: false
|
||||
default: false,
|
||||
},
|
||||
startupCounts: {
|
||||
type: Number,
|
||||
default: 0
|
||||
}
|
||||
default: 0,
|
||||
},
|
||||
},
|
||||
{ timestamps: true }
|
||||
{ timestamps: true },
|
||||
);
|
||||
|
||||
// Instance method
|
||||
@@ -55,7 +55,7 @@ configSchema.methods.incrementCount = function () {
|
||||
|
||||
// Static methods
|
||||
configSchema.statics.findByTag = async function (tag) {
|
||||
return await this.findOne({ tag });
|
||||
return await this.findOne({ tag }).lean();
|
||||
};
|
||||
|
||||
configSchema.statics.updateByTag = async function (tag, update) {
|
||||
@@ -67,7 +67,7 @@ const Config = mongoose.models.Config || mongoose.model('Config', configSchema);
|
||||
module.exports = {
|
||||
getConfigs: async (filter) => {
|
||||
try {
|
||||
return await Config.find(filter).exec();
|
||||
return await Config.find(filter).lean();
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
return { config: 'Error getting configs' };
|
||||
@@ -75,10 +75,10 @@ module.exports = {
|
||||
},
|
||||
deleteConfigs: async (filter) => {
|
||||
try {
|
||||
return await Config.deleteMany(filter).exec();
|
||||
return await Config.deleteMany(filter);
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
return { config: 'Error deleting configs' };
|
||||
}
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
@@ -4,7 +4,7 @@ const { getMessages, deleteMessages } = require('./Message');
|
||||
|
||||
const getConvo = async (user, conversationId) => {
|
||||
try {
|
||||
return await Conversation.findOne({ user, conversationId }).exec();
|
||||
return await Conversation.findOne({ user, conversationId }).lean();
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
return { message: 'Error getting single conversation' };
|
||||
@@ -23,8 +23,8 @@ module.exports = {
|
||||
|
||||
return await Conversation.findOneAndUpdate({ conversationId: conversationId, user }, update, {
|
||||
new: true,
|
||||
upsert: true
|
||||
}).exec();
|
||||
upsert: true,
|
||||
});
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
return { message: 'Error saving conversation' };
|
||||
@@ -35,10 +35,10 @@ module.exports = {
|
||||
const totalConvos = (await Conversation.countDocuments({ user })) || 1;
|
||||
const totalPages = Math.ceil(totalConvos / pageSize);
|
||||
const convos = await Conversation.find({ user })
|
||||
.sort({ createdAt: -1, created: -1 })
|
||||
.sort({ createdAt: -1 })
|
||||
.skip((pageNumber - 1) * pageSize)
|
||||
.limit(pageSize)
|
||||
.exec();
|
||||
.lean();
|
||||
return { conversations: convos, pages: totalPages, pageNumber, pageSize };
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
@@ -54,35 +54,27 @@ module.exports = {
|
||||
const cache = {};
|
||||
const convoMap = {};
|
||||
const promises = [];
|
||||
// will handle a syncing solution soon
|
||||
const deletedConvoIds = [];
|
||||
|
||||
convoIds.forEach((convo) =>
|
||||
promises.push(
|
||||
Conversation.findOne({
|
||||
user,
|
||||
conversationId: convo.conversationId
|
||||
}).exec()
|
||||
)
|
||||
conversationId: convo.conversationId,
|
||||
}).lean(),
|
||||
),
|
||||
);
|
||||
|
||||
const results = (await Promise.all(promises)).filter((convo, i) => {
|
||||
if (!convo) {
|
||||
deletedConvoIds.push(convoIds[i].conversationId);
|
||||
return false;
|
||||
} else {
|
||||
const page = Math.floor(i / pageSize) + 1;
|
||||
if (!cache[page]) {
|
||||
cache[page] = [];
|
||||
}
|
||||
cache[page].push(convo);
|
||||
convoMap[convo.conversationId] = convo;
|
||||
return true;
|
||||
const results = (await Promise.all(promises)).filter(Boolean);
|
||||
|
||||
results.forEach((convo, i) => {
|
||||
const page = Math.floor(i / pageSize) + 1;
|
||||
if (!cache[page]) {
|
||||
cache[page] = [];
|
||||
}
|
||||
cache[page].push(convo);
|
||||
convoMap[convo.conversationId] = convo;
|
||||
});
|
||||
|
||||
// const startIndex = (pageNumber - 1) * pageSize;
|
||||
// const convos = results.slice(startIndex, startIndex + pageSize);
|
||||
const totalPages = Math.ceil(results.length / pageSize);
|
||||
cache.pages = totalPages;
|
||||
cache.pageSize = pageSize;
|
||||
@@ -92,9 +84,7 @@ module.exports = {
|
||||
pages: totalPages || 1,
|
||||
pageNumber,
|
||||
pageSize,
|
||||
// will handle a syncing solution soon
|
||||
filter: new Set(deletedConvoIds),
|
||||
convoMap
|
||||
convoMap,
|
||||
};
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
@@ -121,8 +111,8 @@ module.exports = {
|
||||
deleteConvos: async (user, filter) => {
|
||||
let toRemove = await Conversation.find({ ...filter, user }).select('conversationId');
|
||||
const ids = toRemove.map((instance) => instance.conversationId);
|
||||
let deleteCount = await Conversation.deleteMany({ ...filter, user }).exec();
|
||||
let deleteCount = await Conversation.deleteMany({ ...filter, user });
|
||||
deleteCount.messages = await deleteMessages({ conversationId: { $in: ids } });
|
||||
return deleteCount;
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
@@ -34,9 +34,9 @@ module.exports = {
|
||||
cancelled,
|
||||
tokenCount,
|
||||
plugin,
|
||||
model
|
||||
model,
|
||||
},
|
||||
{ upsert: true, new: true }
|
||||
{ upsert: true, new: true },
|
||||
);
|
||||
|
||||
return {
|
||||
@@ -56,11 +56,7 @@ module.exports = {
|
||||
async updateMessage(message) {
|
||||
try {
|
||||
const { messageId, ...update } = message;
|
||||
const updatedMessage = await Message.findOneAndUpdate(
|
||||
{ messageId },
|
||||
update,
|
||||
{ new: true }
|
||||
);
|
||||
const updatedMessage = await Message.findOneAndUpdate({ messageId }, update, { new: true });
|
||||
|
||||
if (!updatedMessage) {
|
||||
throw new Error('Message not found.');
|
||||
@@ -82,12 +78,12 @@ module.exports = {
|
||||
},
|
||||
async deleteMessagesSince({ messageId, conversationId }) {
|
||||
try {
|
||||
const message = await Message.findOne({ messageId }).exec();
|
||||
const message = await Message.findOne({ messageId }).lean();
|
||||
|
||||
if (message) {
|
||||
return await Message.find({ conversationId })
|
||||
.deleteMany({ createdAt: { $gt: message.createdAt } })
|
||||
.exec();
|
||||
return await Message.find({ conversationId }).deleteMany({
|
||||
createdAt: { $gt: message.createdAt },
|
||||
});
|
||||
}
|
||||
} catch (err) {
|
||||
console.error(`Error deleting messages: ${err}`);
|
||||
@@ -97,7 +93,7 @@ module.exports = {
|
||||
|
||||
async getMessages(filter) {
|
||||
try {
|
||||
return await Message.find(filter).sort({ createdAt: 1 }).exec();
|
||||
return await Message.find(filter).sort({ createdAt: 1 }).lean();
|
||||
} catch (err) {
|
||||
console.error(`Error getting messages: ${err}`);
|
||||
throw new Error('Failed to get messages.');
|
||||
@@ -106,10 +102,10 @@ module.exports = {
|
||||
|
||||
async deleteMessages(filter) {
|
||||
try {
|
||||
return await Message.deleteMany(filter).exec();
|
||||
return await Message.deleteMany(filter);
|
||||
} catch (err) {
|
||||
console.error(`Error deleting messages: ${err}`);
|
||||
throw new Error('Failed to delete messages.');
|
||||
}
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
@@ -2,7 +2,7 @@ const Preset = require('./schema/presetSchema');
|
||||
|
||||
const getPreset = async (user, presetId) => {
|
||||
try {
|
||||
return await Preset.findOne({ user, presetId }).exec();
|
||||
return await Preset.findOne({ user, presetId }).lean();
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
return { message: 'Error getting single preset' };
|
||||
@@ -14,10 +14,10 @@ module.exports = {
|
||||
getPreset,
|
||||
getPresets: async (user, filter) => {
|
||||
try {
|
||||
return await Preset.find({ ...filter, user }).exec();
|
||||
return await Preset.find({ ...filter, user }).lean();
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
return { message: 'Error retriving presets' };
|
||||
return { message: 'Error retrieving presets' };
|
||||
}
|
||||
},
|
||||
savePreset: async (user, { presetId, newPresetId, ...preset }) => {
|
||||
@@ -30,8 +30,8 @@ module.exports = {
|
||||
return await Preset.findOneAndUpdate(
|
||||
{ presetId, user },
|
||||
{ $set: update },
|
||||
{ new: true, upsert: true }
|
||||
).exec();
|
||||
{ new: true, upsert: true },
|
||||
);
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
return { message: 'Error saving preset' };
|
||||
@@ -40,7 +40,7 @@ module.exports = {
|
||||
deletePresets: async (user, filter) => {
|
||||
// let toRemove = await Preset.find({ ...filter, user }).select('presetId');
|
||||
// const ids = toRemove.map((instance) => instance.presetId);
|
||||
let deleteCount = await Preset.deleteMany({ ...filter, user }).exec();
|
||||
let deleteCount = await Preset.deleteMany({ ...filter, user });
|
||||
return deleteCount;
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
@@ -4,17 +4,17 @@ const promptSchema = mongoose.Schema(
|
||||
{
|
||||
title: {
|
||||
type: String,
|
||||
required: true
|
||||
required: true,
|
||||
},
|
||||
prompt: {
|
||||
type: String,
|
||||
required: true
|
||||
required: true,
|
||||
},
|
||||
category: {
|
||||
type: String
|
||||
}
|
||||
type: String,
|
||||
},
|
||||
},
|
||||
{ timestamps: true }
|
||||
{ timestamps: true },
|
||||
);
|
||||
|
||||
const Prompt = mongoose.models.Prompt || mongoose.model('Prompt', promptSchema);
|
||||
@@ -24,7 +24,7 @@ module.exports = {
|
||||
try {
|
||||
await Prompt.create({
|
||||
title,
|
||||
prompt
|
||||
prompt,
|
||||
});
|
||||
return { title, prompt };
|
||||
} catch (error) {
|
||||
@@ -34,7 +34,7 @@ module.exports = {
|
||||
},
|
||||
getPrompts: async (filter) => {
|
||||
try {
|
||||
return await Prompt.find(filter).exec();
|
||||
return await Prompt.find(filter).lean();
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
return { prompt: 'Error getting prompts' };
|
||||
@@ -42,10 +42,10 @@ module.exports = {
|
||||
},
|
||||
deletePrompts: async (filter) => {
|
||||
try {
|
||||
return await Prompt.deleteMany(filter).exec();
|
||||
return await Prompt.deleteMany(filter);
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
return { prompt: 'Error deleting prompts' };
|
||||
}
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
@@ -12,78 +12,83 @@ function log({ title, parameters }) {
|
||||
const Session = mongoose.Schema({
|
||||
refreshToken: {
|
||||
type: String,
|
||||
default: ''
|
||||
}
|
||||
default: '',
|
||||
},
|
||||
});
|
||||
|
||||
const userSchema = mongoose.Schema(
|
||||
{
|
||||
name: {
|
||||
type: String
|
||||
type: String,
|
||||
},
|
||||
username: {
|
||||
type: String,
|
||||
lowercase: true,
|
||||
required: [true, "can't be blank"],
|
||||
required: [true, 'can\'t be blank'],
|
||||
match: [/^[a-zA-Z0-9_-]+$/, 'is invalid'],
|
||||
index: true
|
||||
index: true,
|
||||
},
|
||||
email: {
|
||||
type: String,
|
||||
required: [true, "can't be blank"],
|
||||
required: [true, 'can\'t be blank'],
|
||||
lowercase: true,
|
||||
unique: true,
|
||||
match: [/\S+@\S+\.\S+/, 'is invalid'],
|
||||
index: true
|
||||
index: true,
|
||||
},
|
||||
emailVerified: {
|
||||
type: Boolean,
|
||||
required: true,
|
||||
default: false
|
||||
default: false,
|
||||
},
|
||||
password: {
|
||||
type: String,
|
||||
trim: true,
|
||||
minlength: 8,
|
||||
maxlength: 128
|
||||
maxlength: 128,
|
||||
},
|
||||
avatar: {
|
||||
type: String,
|
||||
required: false
|
||||
required: false,
|
||||
},
|
||||
provider: {
|
||||
type: String,
|
||||
required: true,
|
||||
default: 'local'
|
||||
default: 'local',
|
||||
},
|
||||
role: {
|
||||
type: String,
|
||||
default: 'USER'
|
||||
default: 'USER',
|
||||
},
|
||||
googleId: {
|
||||
type: String,
|
||||
unique: true,
|
||||
sparse: true
|
||||
sparse: true,
|
||||
},
|
||||
openidId: {
|
||||
type: String,
|
||||
unique: true,
|
||||
sparse: true
|
||||
sparse: true,
|
||||
},
|
||||
githubId: {
|
||||
type: String,
|
||||
unique: true,
|
||||
sparse: true
|
||||
sparse: true,
|
||||
},
|
||||
discordId: {
|
||||
type: String,
|
||||
unique: true,
|
||||
sparse: true,
|
||||
},
|
||||
plugins: {
|
||||
type: Array,
|
||||
default: []
|
||||
default: [],
|
||||
},
|
||||
refreshToken: {
|
||||
type: [Session]
|
||||
}
|
||||
type: [Session],
|
||||
},
|
||||
},
|
||||
{ timestamps: true }
|
||||
{ timestamps: true },
|
||||
);
|
||||
|
||||
//Remove refreshToken from the response
|
||||
@@ -91,7 +96,7 @@ userSchema.set('toJSON', {
|
||||
transform: function (_doc, ret) {
|
||||
delete ret.refreshToken;
|
||||
return ret;
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
userSchema.methods.toJSON = function () {
|
||||
@@ -106,7 +111,7 @@ userSchema.methods.toJSON = function () {
|
||||
emailVerified: this.emailVerified,
|
||||
plugins: this.plugins,
|
||||
createdAt: this.createdAt,
|
||||
updatedAt: this.updatedAt
|
||||
updatedAt: this.updatedAt,
|
||||
};
|
||||
};
|
||||
|
||||
@@ -116,10 +121,10 @@ userSchema.methods.generateToken = function () {
|
||||
id: this._id,
|
||||
username: this.username,
|
||||
provider: this.provider,
|
||||
email: this.email
|
||||
email: this.email,
|
||||
},
|
||||
process.env.JWT_SECRET,
|
||||
{ expiresIn: eval(process.env.SESSION_EXPIRY) }
|
||||
{ expiresIn: eval(process.env.SESSION_EXPIRY) },
|
||||
);
|
||||
return token;
|
||||
};
|
||||
@@ -130,17 +135,19 @@ userSchema.methods.generateRefreshToken = function () {
|
||||
id: this._id,
|
||||
username: this.username,
|
||||
provider: this.provider,
|
||||
email: this.email
|
||||
email: this.email,
|
||||
},
|
||||
process.env.JWT_REFRESH_SECRET,
|
||||
{ expiresIn: eval(process.env.REFRESH_TOKEN_EXPIRY) }
|
||||
{ expiresIn: eval(process.env.REFRESH_TOKEN_EXPIRY) },
|
||||
);
|
||||
return refreshToken;
|
||||
};
|
||||
|
||||
userSchema.methods.comparePassword = function (candidatePassword, callback) {
|
||||
bcrypt.compare(candidatePassword, this.password, (err, isMatch) => {
|
||||
if (err) return callback(err);
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
callback(null, isMatch);
|
||||
});
|
||||
};
|
||||
@@ -148,8 +155,11 @@ userSchema.methods.comparePassword = function (candidatePassword, callback) {
|
||||
module.exports.hashPassword = async (password) => {
|
||||
const hashedPassword = await new Promise((resolve, reject) => {
|
||||
bcrypt.hash(password, 10, function (err, hash) {
|
||||
if (err) reject(err);
|
||||
else resolve(hash);
|
||||
if (err) {
|
||||
reject(err);
|
||||
} else {
|
||||
resolve(hash);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
@@ -159,7 +169,7 @@ module.exports.hashPassword = async (password) => {
|
||||
module.exports.validateUser = (user) => {
|
||||
log({
|
||||
title: 'Validate User',
|
||||
parameters: [{ name: 'Validate User', value: user }]
|
||||
parameters: [{ name: 'Validate User', value: user }],
|
||||
});
|
||||
const schema = {
|
||||
avatar: Joi.any(),
|
||||
@@ -169,7 +179,7 @@ module.exports.validateUser = (user) => {
|
||||
.max(80)
|
||||
.regex(/^[a-zA-Z0-9_-]+$/)
|
||||
.required(),
|
||||
password: Joi.string().min(8).max(128).allow('').allow(null)
|
||||
password: Joi.string().min(8).max(128).allow('').allow(null),
|
||||
};
|
||||
|
||||
return schema.validate(user);
|
||||
|
||||
@@ -1,4 +1,10 @@
|
||||
const { getMessages, saveMessage, updateMessage, deleteMessagesSince, deleteMessages } = require('./Message');
|
||||
const {
|
||||
getMessages,
|
||||
saveMessage,
|
||||
updateMessage,
|
||||
deleteMessagesSince,
|
||||
deleteMessages,
|
||||
} = require('./Message');
|
||||
const { getConvoTitle, getConvo, saveConvo } = require('./Conversation');
|
||||
const { getPreset, getPresets, savePreset, deletePresets } = require('./Preset');
|
||||
|
||||
@@ -16,5 +22,5 @@ module.exports = {
|
||||
getPreset,
|
||||
getPresets,
|
||||
savePreset,
|
||||
deletePresets
|
||||
deletePresets,
|
||||
};
|
||||
|
||||
@@ -2,42 +2,130 @@ const mongoose = require('mongoose');
|
||||
const { MeiliSearch } = require('meilisearch');
|
||||
const { cleanUpPrimaryKeyValue } = require('../../lib/utils/misc');
|
||||
const _ = require('lodash');
|
||||
const searchEnabled = process.env.SEARCH && process.env.SEARCH.toLowerCase() === 'true';
|
||||
const meiliEnabled = process.env.MEILI_HOST && process.env.MEILI_MASTER_KEY && searchEnabled;
|
||||
|
||||
const validateOptions = function (options) {
|
||||
const requiredKeys = ['host', 'apiKey', 'indexName'];
|
||||
requiredKeys.forEach((key) => {
|
||||
if (!options[key]) throw new Error(`Missing mongoMeili Option: ${key}`);
|
||||
if (!options[key]) {
|
||||
throw new Error(`Missing mongoMeili Option: ${key}`);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
const createMeiliMongooseModel = function ({ index, indexName, client, attributesToIndex }) {
|
||||
// console.log('attributesToIndex', attributesToIndex);
|
||||
// const createMeiliMongooseModel = function ({ index, indexName, client, attributesToIndex }) {
|
||||
const createMeiliMongooseModel = function ({ index, attributesToIndex }) {
|
||||
const primaryKey = attributesToIndex[0];
|
||||
// MeiliMongooseModel is of type Mongoose.Model
|
||||
class MeiliMongooseModel {
|
||||
// Clear Meili index
|
||||
static async clearMeiliIndex() {
|
||||
await index.delete();
|
||||
// await index.deleteAllDocuments();
|
||||
await this.collection.updateMany({ _meiliIndex: true }, { $set: { _meiliIndex: false } });
|
||||
}
|
||||
|
||||
static async resetIndex() {
|
||||
await this.clearMeiliIndex();
|
||||
await client.createIndex(indexName, { primaryKey });
|
||||
}
|
||||
// Clear Meili index
|
||||
// Push a mongoDB collection to Meili index
|
||||
/**
|
||||
* `syncWithMeili`: synchronizes the data between a MongoDB collection and a MeiliSearch index,
|
||||
* only triggered if there's ever a discrepancy determined by `api\lib\db\indexSync.js`.
|
||||
*
|
||||
* 1. Fetches all documents from the MongoDB collection and the MeiliSearch index.
|
||||
* 2. Compares the documents from both sources.
|
||||
* 3. If a document exists in MeiliSearch but not in MongoDB, it's deleted from MeiliSearch.
|
||||
* 4. If a document exists in MongoDB but not in MeiliSearch, it's added to MeiliSearch.
|
||||
* 5. If a document exists in both but has different `text` or `title` fields (depending on the `primaryKey`), it's updated in MeiliSearch.
|
||||
* 6. After all operations, it updates the `_meiliIndex` field in MongoDB to indicate whether the document is indexed in MeiliSearch.
|
||||
*
|
||||
* Note: This strategy does not use batch operations for Meilisearch as the `index.addDocuments` will discard
|
||||
* the entire batch if there's an error with one document, and will not throw an error if there's an issue.
|
||||
* Also, `index.getDocuments` needs an exact limit on the amount of documents to return, so we build the map in batches.
|
||||
*
|
||||
* @returns {Promise} A promise that resolves when the synchronization is complete.
|
||||
*
|
||||
* @throws {Error} Throws an error if there's an issue with adding a document to MeiliSearch.
|
||||
*/
|
||||
static async syncWithMeili() {
|
||||
await this.resetIndex();
|
||||
// const docs = await this.find();
|
||||
const docs = await this.find({ _meiliIndex: { $in: [null, false] } });
|
||||
console.log('docs', docs.length);
|
||||
await Promise.all(
|
||||
docs.map(function (doc) {
|
||||
return doc.addObjectToMeili();
|
||||
})
|
||||
);
|
||||
try {
|
||||
let moreDocuments = true;
|
||||
const mongoDocuments = await this.find().lean();
|
||||
const format = (doc) => _.pick(doc, attributesToIndex);
|
||||
|
||||
// Prepare for comparison
|
||||
const mongoMap = new Map(mongoDocuments.map((doc) => [doc[primaryKey], format(doc)]));
|
||||
const indexMap = new Map();
|
||||
let offset = 0;
|
||||
const batchSize = 1000;
|
||||
|
||||
while (moreDocuments) {
|
||||
const batch = await index.getDocuments({ limit: batchSize, offset });
|
||||
|
||||
if (batch.results.length === 0) {
|
||||
moreDocuments = false;
|
||||
}
|
||||
|
||||
for (const doc of batch.results) {
|
||||
indexMap.set(doc[primaryKey], format(doc));
|
||||
}
|
||||
|
||||
offset += batchSize;
|
||||
}
|
||||
|
||||
console.log('indexMap', indexMap.size);
|
||||
console.log('mongoMap', mongoMap.size);
|
||||
|
||||
const updateOps = [];
|
||||
|
||||
// Iterate over Meili index documents
|
||||
for (const [id, doc] of indexMap) {
|
||||
const update = {};
|
||||
update[primaryKey] = id;
|
||||
if (mongoMap.has(id)) {
|
||||
// Case: Update
|
||||
// If document also exists in MongoDB, would be update case
|
||||
if (
|
||||
(doc.text && doc.text !== mongoMap.get(id).text) ||
|
||||
(doc.title && doc.title !== mongoMap.get(id).title)
|
||||
) {
|
||||
console.log(`${id} had document discrepancy in ${doc.text ? 'text' : 'title'} field`);
|
||||
updateOps.push({
|
||||
updateOne: { filter: update, update: { $set: { _meiliIndex: true } } },
|
||||
});
|
||||
await index.addDocuments([doc]);
|
||||
}
|
||||
} else {
|
||||
// Case: Delete
|
||||
// If document does not exist in MongoDB, its a delete case from meili index
|
||||
await index.deleteDocument(id);
|
||||
updateOps.push({
|
||||
updateOne: { filter: update, update: { $set: { _meiliIndex: false } } },
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Iterate over MongoDB documents
|
||||
for (const [id, doc] of mongoMap) {
|
||||
const update = {};
|
||||
update[primaryKey] = id;
|
||||
// Case: Insert
|
||||
// If document does not exist in Meili Index, Its an insert case
|
||||
if (!indexMap.has(id)) {
|
||||
await index.addDocuments([doc]);
|
||||
updateOps.push({
|
||||
updateOne: { filter: update, update: { $set: { _meiliIndex: true } } },
|
||||
});
|
||||
} else if (doc._meiliIndex === false) {
|
||||
updateOps.push({
|
||||
updateOne: { filter: update, update: { $set: { _meiliIndex: true } } },
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (updateOps.length > 0) {
|
||||
await this.collection.bulkWrite(updateOps);
|
||||
console.log(
|
||||
`[Meilisearch] Finished indexing ${
|
||||
primaryKey === 'messageId' ? 'messages' : 'conversations'
|
||||
}`,
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
console.log('[Meilisearch] Error adding document to Meili');
|
||||
console.error(error);
|
||||
}
|
||||
}
|
||||
|
||||
// Set one or more settings of the meili index
|
||||
@@ -63,9 +151,9 @@ const createMeiliMongooseModel = function ({ index, indexName, client, attribute
|
||||
function (results, value, key) {
|
||||
return { ...results, [key]: 1 };
|
||||
},
|
||||
{ _id: 1 }
|
||||
)
|
||||
);
|
||||
{ _id: 1 },
|
||||
),
|
||||
).lean();
|
||||
|
||||
// Add additional data from mongodb into Meili search hits
|
||||
const populatedHits = data.hits.map(function (hit) {
|
||||
@@ -74,8 +162,8 @@ const createMeiliMongooseModel = function ({ index, indexName, client, attribute
|
||||
const originalHit = _.find(hitsFromMongoose, query);
|
||||
|
||||
return {
|
||||
...(originalHit ? originalHit.toJSON() : {}),
|
||||
...hit
|
||||
...(originalHit ?? {}),
|
||||
...hit,
|
||||
};
|
||||
});
|
||||
data.hits = populatedHits;
|
||||
@@ -84,15 +172,19 @@ const createMeiliMongooseModel = function ({ index, indexName, client, attribute
|
||||
return data;
|
||||
}
|
||||
|
||||
// Push new document to Meili
|
||||
async addObjectToMeili() {
|
||||
preprocessObjectForIndex() {
|
||||
const object = _.pick(this.toJSON(), attributesToIndex);
|
||||
// NOTE: MeiliSearch does not allow | in primary key, so we replace it with - for Bing convoIds
|
||||
// object.conversationId = object.conversationId.replace(/\|/g, '-');
|
||||
if (object.conversationId && object.conversationId.includes('|')) {
|
||||
object.conversationId = object.conversationId.replace(/\|/g, '--');
|
||||
}
|
||||
return object;
|
||||
}
|
||||
|
||||
// Push new document to Meili
|
||||
async addObjectToMeili() {
|
||||
const object = this.preprocessObjectForIndex();
|
||||
try {
|
||||
// console.log('Adding document to Meili', object);
|
||||
await index.addDocuments([object]);
|
||||
@@ -152,8 +244,8 @@ module.exports = function mongoMeili(schema, options) {
|
||||
type: Boolean,
|
||||
required: false,
|
||||
select: false,
|
||||
default: false
|
||||
}
|
||||
default: false,
|
||||
},
|
||||
});
|
||||
|
||||
const { host, apiKey, indexName, primaryKey } = options;
|
||||
@@ -174,8 +266,8 @@ module.exports = function mongoMeili(schema, options) {
|
||||
return value.meiliIndex ? [...results, key] : results;
|
||||
// }, []), '_id'];
|
||||
},
|
||||
[]
|
||||
)
|
||||
[],
|
||||
),
|
||||
];
|
||||
|
||||
schema.loadClass(createMeiliMongooseModel({ index, indexName, client, attributesToIndex }));
|
||||
@@ -190,19 +282,67 @@ module.exports = function mongoMeili(schema, options) {
|
||||
schema.post('remove', function (doc) {
|
||||
doc.postRemoveHook();
|
||||
});
|
||||
schema.post('deleteMany', function () {
|
||||
// console.log('deleteMany hook', doc);
|
||||
if (Object.prototype.hasOwnProperty.call(schema.obj, 'messages')) {
|
||||
console.log('Syncing convos...');
|
||||
mongoose.model('Conversation').syncWithMeili();
|
||||
|
||||
schema.pre('deleteMany', async function (next) {
|
||||
if (!meiliEnabled) {
|
||||
next();
|
||||
}
|
||||
|
||||
if (Object.prototype.hasOwnProperty.call(schema.obj, 'messageId')) {
|
||||
console.log('Syncing messages...');
|
||||
mongoose.model('Message').syncWithMeili();
|
||||
try {
|
||||
if (Object.prototype.hasOwnProperty.call(schema.obj, 'messages')) {
|
||||
const convoIndex = client.index('convos');
|
||||
const deletedConvos = await mongoose.model('Conversation').find(this._conditions).lean();
|
||||
let promises = [];
|
||||
for (const convo of deletedConvos) {
|
||||
promises.push(convoIndex.deleteDocument(convo.conversationId));
|
||||
}
|
||||
await Promise.all(promises);
|
||||
}
|
||||
|
||||
if (Object.prototype.hasOwnProperty.call(schema.obj, 'messageId')) {
|
||||
const messageIndex = client.index('messages');
|
||||
const deletedMessages = await mongoose.model('Message').find(this._conditions).lean();
|
||||
let promises = [];
|
||||
for (const message of deletedMessages) {
|
||||
promises.push(messageIndex.deleteDocument(message.messageId));
|
||||
}
|
||||
await Promise.all(promises);
|
||||
}
|
||||
return next();
|
||||
} catch (error) {
|
||||
if (meiliEnabled) {
|
||||
console.log(
|
||||
'[Meilisearch] There was an issue deleting conversation indexes upon deletion, next startup may be slow due to syncing',
|
||||
);
|
||||
console.error(error);
|
||||
}
|
||||
return next();
|
||||
}
|
||||
});
|
||||
schema.post('findOneAndUpdate', function (doc) {
|
||||
|
||||
schema.post('findOneAndUpdate', async function (doc) {
|
||||
if (!meiliEnabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (doc.unfinished) {
|
||||
return;
|
||||
}
|
||||
|
||||
let meiliDoc;
|
||||
// Doc is a Conversation
|
||||
if (doc.messages) {
|
||||
try {
|
||||
meiliDoc = await client.index('convos').getDocument(doc.conversationId);
|
||||
} catch (error) {
|
||||
console.log('[Meilisearch] Convo not found and will index', doc.conversationId);
|
||||
}
|
||||
}
|
||||
|
||||
if (meiliDoc && meiliDoc.title === doc.title) {
|
||||
return;
|
||||
}
|
||||
|
||||
doc.postSaveHook();
|
||||
});
|
||||
};
|
||||
|
||||
@@ -8,48 +8,48 @@ const convoSchema = mongoose.Schema(
|
||||
unique: true,
|
||||
required: true,
|
||||
index: true,
|
||||
meiliIndex: true
|
||||
meiliIndex: true,
|
||||
},
|
||||
title: {
|
||||
type: String,
|
||||
default: 'New Chat',
|
||||
meiliIndex: true
|
||||
meiliIndex: true,
|
||||
},
|
||||
user: {
|
||||
type: String,
|
||||
default: null
|
||||
default: null,
|
||||
},
|
||||
messages: [{ type: mongoose.Schema.Types.ObjectId, ref: 'Message' }],
|
||||
// google only
|
||||
examples: [{ type: mongoose.Schema.Types.Mixed }],
|
||||
agentOptions: {
|
||||
type: mongoose.Schema.Types.Mixed,
|
||||
default: null
|
||||
default: null,
|
||||
},
|
||||
...conversationPreset,
|
||||
// for bingAI only
|
||||
bingConversationId: {
|
||||
type: String,
|
||||
default: null
|
||||
default: null,
|
||||
},
|
||||
jailbreakConversationId: {
|
||||
type: String,
|
||||
default: null
|
||||
default: null,
|
||||
},
|
||||
conversationSignature: {
|
||||
type: String,
|
||||
default: null
|
||||
default: null,
|
||||
},
|
||||
clientId: {
|
||||
type: String,
|
||||
default: null
|
||||
default: null,
|
||||
},
|
||||
invocationId: {
|
||||
type: Number,
|
||||
default: 1
|
||||
}
|
||||
default: 1,
|
||||
},
|
||||
},
|
||||
{ timestamps: true }
|
||||
{ timestamps: true },
|
||||
);
|
||||
|
||||
if (process.env.MEILI_HOST && process.env.MEILI_MASTER_KEY) {
|
||||
@@ -57,10 +57,12 @@ if (process.env.MEILI_HOST && process.env.MEILI_MASTER_KEY) {
|
||||
host: process.env.MEILI_HOST,
|
||||
apiKey: process.env.MEILI_MASTER_KEY,
|
||||
indexName: 'convos', // Will get created automatically if it doesn't exist already
|
||||
primaryKey: 'conversationId'
|
||||
primaryKey: 'conversationId',
|
||||
});
|
||||
}
|
||||
|
||||
convoSchema.index({ createdAt: 1 });
|
||||
|
||||
const Conversation = mongoose.models.Conversation || mongoose.model('Conversation', convoSchema);
|
||||
|
||||
module.exports = Conversation;
|
||||
|
||||
@@ -1,158 +1,158 @@
|
||||
const conversationPreset = {
|
||||
// endpoint: [azureOpenAI, openAI, bingAI, chatGPTBrowser]
|
||||
// endpoint: [azureOpenAI, openAI, bingAI, anthropic, chatGPTBrowser]
|
||||
endpoint: {
|
||||
type: String,
|
||||
default: null,
|
||||
required: true
|
||||
required: true,
|
||||
},
|
||||
// for azureOpenAI, openAI, chatGPTBrowser only
|
||||
model: {
|
||||
type: String,
|
||||
default: null,
|
||||
required: false
|
||||
required: false,
|
||||
},
|
||||
// for azureOpenAI, openAI only
|
||||
chatGptLabel: {
|
||||
type: String,
|
||||
default: null,
|
||||
required: false
|
||||
required: false,
|
||||
},
|
||||
// for google only
|
||||
modelLabel: {
|
||||
type: String,
|
||||
default: null,
|
||||
required: false
|
||||
required: false,
|
||||
},
|
||||
promptPrefix: {
|
||||
type: String,
|
||||
default: null,
|
||||
required: false
|
||||
required: false,
|
||||
},
|
||||
temperature: {
|
||||
type: Number,
|
||||
default: 1,
|
||||
required: false
|
||||
required: false,
|
||||
},
|
||||
top_p: {
|
||||
type: Number,
|
||||
default: 1,
|
||||
required: false
|
||||
required: false,
|
||||
},
|
||||
// for google only
|
||||
topP: {
|
||||
type: Number,
|
||||
default: 0.95,
|
||||
required: false
|
||||
required: false,
|
||||
},
|
||||
topK: {
|
||||
type: Number,
|
||||
default: 40,
|
||||
required: false
|
||||
required: false,
|
||||
},
|
||||
maxOutputTokens: {
|
||||
type: Number,
|
||||
default: 1024,
|
||||
required: false
|
||||
required: false,
|
||||
},
|
||||
presence_penalty: {
|
||||
type: Number,
|
||||
default: 0,
|
||||
required: false
|
||||
required: false,
|
||||
},
|
||||
frequency_penalty: {
|
||||
type: Number,
|
||||
default: 0,
|
||||
required: false
|
||||
required: false,
|
||||
},
|
||||
// for bingai only
|
||||
jailbreak: {
|
||||
type: Boolean,
|
||||
default: false
|
||||
default: false,
|
||||
},
|
||||
context: {
|
||||
type: String,
|
||||
default: null
|
||||
default: null,
|
||||
},
|
||||
systemMessage: {
|
||||
type: String,
|
||||
default: null
|
||||
default: null,
|
||||
},
|
||||
toneStyle: {
|
||||
type: String,
|
||||
default: null
|
||||
}
|
||||
default: null,
|
||||
},
|
||||
};
|
||||
|
||||
const agentOptions = {
|
||||
model: {
|
||||
type: String,
|
||||
default: null,
|
||||
required: false
|
||||
required: false,
|
||||
},
|
||||
// for azureOpenAI, openAI only
|
||||
chatGptLabel: {
|
||||
type: String,
|
||||
default: null,
|
||||
required: false
|
||||
required: false,
|
||||
},
|
||||
// for google only
|
||||
modelLabel: {
|
||||
type: String,
|
||||
default: null,
|
||||
required: false
|
||||
required: false,
|
||||
},
|
||||
promptPrefix: {
|
||||
type: String,
|
||||
default: null,
|
||||
required: false
|
||||
required: false,
|
||||
},
|
||||
temperature: {
|
||||
type: Number,
|
||||
default: 1,
|
||||
required: false
|
||||
required: false,
|
||||
},
|
||||
top_p: {
|
||||
type: Number,
|
||||
default: 1,
|
||||
required: false
|
||||
required: false,
|
||||
},
|
||||
// for google only
|
||||
topP: {
|
||||
type: Number,
|
||||
default: 0.95,
|
||||
required: false
|
||||
required: false,
|
||||
},
|
||||
topK: {
|
||||
type: Number,
|
||||
default: 40,
|
||||
required: false
|
||||
required: false,
|
||||
},
|
||||
maxOutputTokens: {
|
||||
type: Number,
|
||||
default: 1024,
|
||||
required: false
|
||||
required: false,
|
||||
},
|
||||
presence_penalty: {
|
||||
type: Number,
|
||||
default: 0,
|
||||
required: false
|
||||
required: false,
|
||||
},
|
||||
frequency_penalty: {
|
||||
type: Number,
|
||||
default: 0,
|
||||
required: false
|
||||
required: false,
|
||||
},
|
||||
context: {
|
||||
type: String,
|
||||
default: null
|
||||
default: null,
|
||||
},
|
||||
systemMessage: {
|
||||
type: String,
|
||||
default: null
|
||||
}
|
||||
default: null,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
module.exports = {
|
||||
conversationPreset,
|
||||
agentOptions
|
||||
};
|
||||
agentOptions,
|
||||
};
|
||||
|
||||
@@ -7,88 +7,88 @@ const messageSchema = mongoose.Schema(
|
||||
unique: true,
|
||||
required: true,
|
||||
index: true,
|
||||
meiliIndex: true
|
||||
meiliIndex: true,
|
||||
},
|
||||
conversationId: {
|
||||
type: String,
|
||||
required: true,
|
||||
meiliIndex: true
|
||||
meiliIndex: true,
|
||||
},
|
||||
model: {
|
||||
type: String
|
||||
type: String,
|
||||
},
|
||||
conversationSignature: {
|
||||
type: String
|
||||
type: String,
|
||||
// required: true
|
||||
},
|
||||
clientId: {
|
||||
type: String
|
||||
type: String,
|
||||
},
|
||||
invocationId: {
|
||||
type: String
|
||||
type: String,
|
||||
},
|
||||
parentMessageId: {
|
||||
type: String
|
||||
type: String,
|
||||
// required: true
|
||||
},
|
||||
tokenCount: {
|
||||
type: Number
|
||||
type: Number,
|
||||
},
|
||||
refinedTokenCount: {
|
||||
type: Number
|
||||
type: Number,
|
||||
},
|
||||
sender: {
|
||||
type: String,
|
||||
required: true,
|
||||
meiliIndex: true
|
||||
meiliIndex: true,
|
||||
},
|
||||
text: {
|
||||
type: String,
|
||||
required: true,
|
||||
meiliIndex: true
|
||||
meiliIndex: true,
|
||||
},
|
||||
refinedMessageText: {
|
||||
type: String
|
||||
type: String,
|
||||
},
|
||||
isCreatedByUser: {
|
||||
type: Boolean,
|
||||
required: true,
|
||||
default: false
|
||||
default: false,
|
||||
},
|
||||
unfinished: {
|
||||
type: Boolean,
|
||||
default: false
|
||||
default: false,
|
||||
},
|
||||
cancelled: {
|
||||
type: Boolean,
|
||||
default: false
|
||||
default: false,
|
||||
},
|
||||
error: {
|
||||
type: Boolean,
|
||||
default: false
|
||||
default: false,
|
||||
},
|
||||
_meiliIndex: {
|
||||
type: Boolean,
|
||||
required: false,
|
||||
select: false,
|
||||
default: false
|
||||
default: false,
|
||||
},
|
||||
plugin: {
|
||||
latest: {
|
||||
type: String,
|
||||
required: false
|
||||
required: false,
|
||||
},
|
||||
inputs: {
|
||||
type: [mongoose.Schema.Types.Mixed],
|
||||
required: false
|
||||
required: false,
|
||||
},
|
||||
outputs: {
|
||||
type: String,
|
||||
required: false
|
||||
}
|
||||
}
|
||||
required: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{ timestamps: true }
|
||||
{ timestamps: true },
|
||||
);
|
||||
|
||||
if (process.env.MEILI_HOST && process.env.MEILI_MASTER_KEY) {
|
||||
@@ -96,10 +96,12 @@ if (process.env.MEILI_HOST && process.env.MEILI_MASTER_KEY) {
|
||||
host: process.env.MEILI_HOST,
|
||||
apiKey: process.env.MEILI_MASTER_KEY,
|
||||
indexName: 'messages',
|
||||
primaryKey: 'messageId'
|
||||
primaryKey: 'messageId',
|
||||
});
|
||||
}
|
||||
|
||||
messageSchema.index({ createdAt: 1 });
|
||||
|
||||
const Message = mongoose.models.Message || mongoose.model('Message', messageSchema);
|
||||
|
||||
module.exports = Message;
|
||||
|
||||
@@ -8,19 +8,19 @@ const pluginAuthSchema = mongoose.Schema(
|
||||
},
|
||||
value: {
|
||||
type: String,
|
||||
required: true
|
||||
required: true,
|
||||
},
|
||||
userId: {
|
||||
type: String,
|
||||
required: true
|
||||
required: true,
|
||||
},
|
||||
pluginKey: {
|
||||
type: String,
|
||||
}
|
||||
},
|
||||
},
|
||||
{ timestamps: true }
|
||||
{ timestamps: true },
|
||||
);
|
||||
|
||||
const PluginAuth = mongoose.models.Plugin || mongoose.model('PluginAuth', pluginAuthSchema);
|
||||
|
||||
module.exports = PluginAuth;
|
||||
module.exports = PluginAuth;
|
||||
|
||||
@@ -6,26 +6,26 @@ const presetSchema = mongoose.Schema(
|
||||
type: String,
|
||||
unique: true,
|
||||
required: true,
|
||||
index: true
|
||||
index: true,
|
||||
},
|
||||
title: {
|
||||
type: String,
|
||||
default: 'New Chat',
|
||||
meiliIndex: true
|
||||
meiliIndex: true,
|
||||
},
|
||||
user: {
|
||||
type: String,
|
||||
default: null
|
||||
default: null,
|
||||
},
|
||||
// google only
|
||||
examples: [{ type: mongoose.Schema.Types.Mixed }],
|
||||
...conversationPreset,
|
||||
agentOptions: {
|
||||
type: mongoose.Schema.Types.Mixed,
|
||||
default: null
|
||||
}
|
||||
default: null,
|
||||
},
|
||||
},
|
||||
{ timestamps: true }
|
||||
{ timestamps: true },
|
||||
);
|
||||
|
||||
const Preset = mongoose.models.Preset || mongoose.model('Preset', presetSchema);
|
||||
|
||||
@@ -5,18 +5,18 @@ const tokenSchema = new Schema({
|
||||
userId: {
|
||||
type: Schema.Types.ObjectId,
|
||||
required: true,
|
||||
ref: 'user'
|
||||
ref: 'user',
|
||||
},
|
||||
token: {
|
||||
type: String,
|
||||
required: true
|
||||
required: true,
|
||||
},
|
||||
createdAt: {
|
||||
type: Date,
|
||||
required: true,
|
||||
default: Date.now,
|
||||
expires: 900
|
||||
}
|
||||
expires: 900,
|
||||
},
|
||||
});
|
||||
|
||||
module.exports = mongoose.model('Token', tokenSchema);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@librechat/backend",
|
||||
"version": "0.5.3",
|
||||
"version": "0.5.6",
|
||||
"description": "",
|
||||
"scripts": {
|
||||
"start": "echo 'please run this from the root directory'",
|
||||
@@ -20,9 +20,11 @@
|
||||
},
|
||||
"homepage": "https://github.com/danny-avila/LibreChat#readme",
|
||||
"dependencies": {
|
||||
"@anthropic-ai/sdk": "^0.5.4",
|
||||
"@dqbd/tiktoken": "^1.0.2",
|
||||
"@fortaine/fetch-event-source": "^3.0.6",
|
||||
"@keyv/mongo": "^2.1.8",
|
||||
"@waylaidwanderer/chatgpt-api": "^1.37.0",
|
||||
"@waylaidwanderer/chatgpt-api": "^1.37.2",
|
||||
"axios": "^1.3.4",
|
||||
"bcryptjs": "^2.4.3",
|
||||
"cheerio": "^1.0.0-rc.12",
|
||||
@@ -41,7 +43,7 @@
|
||||
"jsonwebtoken": "^9.0.0",
|
||||
"keyv": "^4.5.2",
|
||||
"keyv-file": "^0.2.0",
|
||||
"langchain": "^0.0.103",
|
||||
"langchain": "^0.0.114",
|
||||
"lodash": "^4.17.21",
|
||||
"meilisearch": "^0.33.0",
|
||||
"mongoose": "^7.1.1",
|
||||
@@ -49,7 +51,9 @@
|
||||
"openai": "^3.2.1",
|
||||
"openid-client": "^5.4.2",
|
||||
"passport": "^0.6.0",
|
||||
"passport-discord": "^0.1.4",
|
||||
"passport-facebook": "^3.0.0",
|
||||
"passport-github2": "^0.1.12",
|
||||
"passport-google-oauth20": "^2.0.0",
|
||||
"passport-jwt": "^4.0.1",
|
||||
"passport-local": "^1.0.0",
|
||||
|
||||
@@ -1,8 +1,4 @@
|
||||
const {
|
||||
registerUser,
|
||||
requestPasswordReset,
|
||||
resetPassword
|
||||
} = require('../services/auth.service');
|
||||
const { registerUser, requestPasswordReset, resetPassword } = require('../services/auth.service');
|
||||
|
||||
const isProduction = process.env.NODE_ENV === 'production';
|
||||
|
||||
@@ -16,7 +12,7 @@ const registrationController = async (req, res) => {
|
||||
res.cookie('token', token, {
|
||||
expires: new Date(Date.now() + eval(process.env.SESSION_EXPIRY)),
|
||||
httpOnly: false,
|
||||
secure: isProduction
|
||||
secure: isProduction,
|
||||
});
|
||||
res.status(status).send({ user });
|
||||
} else {
|
||||
@@ -52,7 +48,7 @@ const resetPasswordController = async (req, res) => {
|
||||
const resetPasswordService = await resetPassword(
|
||||
req.body.userId,
|
||||
req.body.token,
|
||||
req.body.password
|
||||
req.body.password,
|
||||
);
|
||||
if (resetPasswordService instanceof Error) {
|
||||
return res.status(400).json(resetPasswordService);
|
||||
@@ -120,5 +116,5 @@ module.exports = {
|
||||
// refreshController,
|
||||
registrationController,
|
||||
resetPasswordRequestController,
|
||||
resetPasswordController
|
||||
resetPasswordController,
|
||||
};
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user