Add agent hosts file, update docker-compose configurations, and introduce healing script for Portainer agents

This commit is contained in:
2025-12-22 23:43:20 +00:00
parent 055efb1227
commit c8fde79222
10 changed files with 1073 additions and 29 deletions

2
agent_hosts.txt Normal file
View File

@@ -0,0 +1,2 @@
192.168.50.151
192.168.50.210

View File

@@ -1,13 +1,5 @@
{
"mcpServers": {
"context7": {
"type": "sse",
"url": "http://192.168.50.185:8098/servers/context7/sse"
},
"firecrawl": {
"type": "sse",
"url": "http://192.168.50.185:8097/sse"
},
"pieces-os": {
"type": "sse",
"url": "http://192.168.50.185:8096/servers/pieces-os/sse"

View File

@@ -3,7 +3,7 @@
"use_api_key": false,
"config_file_path": "/data/mcp_generated_config.json",
"log_file_path": "/data/mcpo_manager.log",
"public_base_url": "http://192.168.50.151:58008",
"public_base_url": "http://mcp.toy",
"log_auto_refresh_enabled": true,
"log_auto_refresh_interval_seconds": 5,
"health_check_enabled": false,

View File

@@ -31,7 +31,7 @@ x-common-env: &common-env
# PROXY_PASSWORD: Squirtle123+
PROXY_SERVER: isp.oxylabs.io:8001
PROXY_USERNAME: customer-vasceannie_MPKJX
PROXY_PASSWORD: Squirtle123+
PROXY_PASSWORD: "Squirtle123+"
SEARXNG_ENDPOINT: http://searxng:8080
SEARXNG_ENGINES: ${SEARXNG_ENGINES}
SEARXNG_CATEGORIES: ${SEARXNG_CATEGORIES}

View File

@@ -0,0 +1,543 @@
version: "3.8"
services:
studio:
image: supabase/studio:2025.06.30-sha-6f5982d
restart: unless-stopped
healthcheck:
test:
- CMD
- node
- -e
- fetch('http://127.0.0.1:3000/api/platform/profile').then((r) => {if (r.status !== 200) throw new Error(r.status)})
timeout: 10s
interval: 5s
retries: 3
depends_on:
- analytics
environment:
- STUDIO_PG_META_URL=http://meta:8080
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-squirtle123456}
- DEFAULT_ORGANIZATION_NAME=${STUDIO_DEFAULT_ORGANIZATION:-Default Organization}
- DEFAULT_PROJECT_NAME=${STUDIO_DEFAULT_PROJECT:-Default Project}
- OPENAI_API_KEY=${OPENAI_API_KEY:-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}
- SUPABASE_URL=http://kong:8000
- SUPABASE_PUBLIC_URL=https://supa.baked.rocks
- SUPABASE_ANON_KEY=${ANON_KEY:-eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwiaWF0IjoxNjQxNzY5MjAwLCJleHAiOjE3OTk1MzU2MDAsInJvbGUiOiJhbm9uIn0.eO2Vi38564_lLqhEXR3s7nLbHL34014jYe05y9lpr1U}
- SUPABASE_SERVICE_KEY=${SERVICE_ROLE_KEY:-eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwiaWF0IjoxNjQxNzY5MjAwLCJleHAiOjE3OTk1MzU2MDAsInJvbGUiOiJzZXJ2aWNlX3JvbGUifQ.l11N14S64uypRkNCbbWm4kV_DuqEIrsEySeBB4YNH1k}
- AUTH_JWT_SECRET=${JWT_SECRET:-UhauiNFrEjOL4j0QfFyfSI3NrRyvvl24}
- LOGFLARE_PRIVATE_ACCESS_TOKEN=${LOGFLARE_PRIVATE_ACCESS_TOKEN:-UhauiNFrEjOL4j0QfFyfSI3NrRyvvl24}
- LOGFLARE_URL=http://analytics:4000
- NEXT_PUBLIC_ENABLE_LOGS=true
- NEXT_ANALYTICS_BACKEND_PROVIDER=postgres
networks:
- supabase
deploy:
replicas: 1
placement:
constraints:
- node.hostname == little
restart_policy:
condition: any
delay: 5s
max_attempts: 3
labels:
- traefik.http.services.supa-kong.loadbalancer.healthcheck.path=/
- traefik.http.services.supa-kong.loadbalancer.healthcheck.interval=30s
- traefik.http.services.supa-kong.loadbalancer.healthcheck.headers.Authorization=Basic dmFzY2Vhbm5pZTpzcXVpcnRsZTEyMw==
kong:
image: kong:2.8.1
restart: unless-stopped
expose:
- ${KONG_HTTP_PORT:-8000}
- ${KONG_HTTPS_PORT:-8443}
healthcheck:
test:
- CMD
- wget
- --no-verbose
- --tries=1
- --spider
- http://127.0.0.1:8001/status
timeout: 5s
interval: 5s
retries: 3
volumes:
- /home/trav/supabase/docker/volumes/api/kong.yml:/home/kong/temp.yml:ro,z
depends_on:
- analytics
environment:
- KONG_DATABASE=off
- KONG_DECLARATIVE_CONFIG=/home/kong/kong.yml
- KONG_DNS_ORDER=LAST,A,CNAME
- KONG_PLUGINS=request-transformer,cors,key-auth,acl,basic-auth
- KONG_NGINX_PROXY_PROXY_BUFFER_SIZE=160k
- KONG_NGINX_PROXY_PROXY_BUFFERS=64 160k
- SUPABASE_ANON_KEY=${ANON_KEY:-eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwiaWF0IjoxNjQxNzY5MjAwLCJleHAiOjE3OTk1MzU2MDAsInJvbGUiOiJhbm9uIn0.eO2Vi38564_lLqhEXR3s7nLbHL34014jYe05y9lpr1U}
- SUPABASE_SERVICE_KEY=${SERVICE_ROLE_KEY:-eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwiaWF0IjoxNjQxNzY5MjAwLCJleHAiOjE3OTk1MzU2MDAsInJvbGUiOiJzZXJ2aWNlX3JvbGUifQ.l11N14S64uypRkNCbbWm4kV_DuqEIrsEySeBB4YNH1k}
- DASHBOARD_USERNAME=${SERVICE_USER_KONG:-supabase}
- DASHBOARD_PASSWORD=${SERVICE_PASSWORD_KONG:-supabase123}
entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start'
networks:
- supabase
deploy:
replicas: 1
placement:
constraints:
- node.hostname == little
restart_policy:
condition: any
delay: 5s
max_attempts: 3
auth:
image: supabase/gotrue:v2.177.0
restart: unless-stopped
healthcheck:
test:
- CMD
- wget
- --no-verbose
- --tries=1
- --spider
- http://localhost:9999/health
timeout: 5s
interval: 5s
retries: 3
depends_on:
- db
- analytics
environment:
- GOTRUE_API_HOST=0.0.0.0
- GOTRUE_API_PORT=9999
- API_EXTERNAL_URL=https://supa.baked.rocks
- GOTRUE_DB_DRIVER=postgres
- GOTRUE_DB_DATABASE_URL=postgres://supabase_auth_admin:${POSTGRES_PASSWORD:-squirtle123456}@${POSTGRES_HOST:-db}:${POSTGRES_PORT:-5432}/${POSTGRES_DB:-postgres}
- GOTRUE_SITE_URL=${SERVICE_URL_KONG}
- GOTRUE_URI_ALLOW_LIST=${ADDITIONAL_REDIRECT_URLS:-}
- GOTRUE_DISABLE_SIGNUP=${DISABLE_SIGNUP:-false}
- GOTRUE_JWT_ADMIN_ROLES=service_role
- GOTRUE_JWT_AUD=authenticated
- GOTRUE_JWT_DEFAULT_GROUP_NAME=authenticated
- GOTRUE_JWT_EXP=${JWT_EXPIRY:-3600}
- GOTRUE_JWT_SECRET=${JWT_SECRET:-UhauiNFrEjOL4j0QfFyfSI3NrRyvvl24}
- GOTRUE_EXTERNAL_EMAIL_ENABLED=${ENABLE_EMAIL_SIGNUP:-true}
- GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED=${ENABLE_ANONYMOUS_USERS:-false}
- GOTRUE_MAILER_AUTOCONFIRM=${ENABLE_EMAIL_AUTOCONFIRM:-false}
- GOTRUE_SMTP_ADMIN_EMAIL=${SMTP_ADMIN_EMAIL:-admin@example.com}
- GOTRUE_SMTP_HOST=${SMTP_HOST:-supabase-mail}
- GOTRUE_SMTP_PORT=${SMTP_PORT:-2500}
- GOTRUE_SMTP_USER=${SMTP_USER:-fake_mail_user}
- GOTRUE_SMTP_PASS=${SMTP_PASS:-fake_mail_password}
- GOTRUE_SMTP_SENDER_NAME=${SMTP_SENDER_NAME:-fake_sender}
- GOTRUE_MAILER_URLPATHS_INVITE=${MAILER_URLPATHS_INVITE:-"/auth/v1/verify"}
- GOTRUE_MAILER_URLPATHS_CONFIRMATION=${MAILER_URLPATHS_CONFIRMATION:-"/auth/v1/verify"}
- GOTRUE_MAILER_URLPATHS_RECOVERY=${MAILER_URLPATHS_RECOVERY:-"/auth/v1/verify"}
- GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE=${MAILER_URLPATHS_EMAIL_CHANGE:-"/auth/v1/verify"}
- GOTRUE_EXTERNAL_PHONE_ENABLED=${ENABLE_PHONE_SIGNUP:-true}
- GOTRUE_SMS_AUTOCONFIRM=${ENABLE_PHONE_AUTOCONFIRM:-true}
networks:
- supabase
deploy:
replicas: 1
placement:
constraints:
- node.hostname == little
restart_policy:
condition: any
delay: 5s
max_attempts: 3
rest:
image: postgrest/postgrest:v12.2.12
restart: unless-stopped
depends_on:
- db
- analytics
environment:
- PGRST_DB_URI=postgres://authenticator:${POSTGRES_PASSWORD:-squirtle123456}@${POSTGRES_HOST:-db}:${POSTGRES_PORT:-5432}/${POSTGRES_DB:-postgres}
- PGRST_DB_SCHEMAS=${PGRST_DB_SCHEMAS:-public,storage,graphql_public}
- PGRST_DB_ANON_ROLE=anon
- PGRST_JWT_SECRET=${JWT_SECRET:-UhauiNFrEjOL4j0QfFyfSI3NrRyvvl24}
- PGRST_DB_USE_LEGACY_GUCS=false
- PGRST_APP_SETTINGS_JWT_SECRET=${JWT_SECRET:-UhauiNFrEjOL4j0QfFyfSI3NrRyvvl24}
- PGRST_APP_SETTINGS_JWT_EXP=${JWT_EXPIRY:-3600}
command:
- postgrest
networks:
- supabase
deploy:
replicas: 1
placement:
constraints:
- node.hostname == little
restart_policy:
condition: any
delay: 5s
max_attempts: 3
realtime:
image: supabase/realtime:v2.34.47
restart: unless-stopped
depends_on:
- db
- analytics
healthcheck:
test:
- CMD
- curl
- -sSfL
- --head
- -o
- /dev/null
- -H
- "Authorization: Bearer ${ANON_KEY:-eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwiaWF0IjoxNjQxNzY5MjAwLCJleHAiOjE3OTk1MzU2MDAsInJvbGUiOiJhbm9uIn0.eO2Vi38564_lLqhEXR3s7nLbHL34014jYe05y9lpr1U}"
- http://localhost:4000/api/tenants/realtime-dev/health
timeout: 5s
interval: 5s
retries: 3
environment:
- PORT=4000
- DB_HOST=${POSTGRES_HOST:-db}
- DB_PORT=${POSTGRES_PORT:-5432}
- DB_USER=supabase_admin
- DB_PASSWORD=${POSTGRES_PASSWORD:-squirtle123456}
- DB_NAME=${POSTGRES_DB:-postgres}
- DB_AFTER_CONNECT_QUERY=SET search_path TO _realtime
- DB_ENC_KEY=supabaserealtime
- API_JWT_SECRET=${JWT_SECRET:-UhauiNFrEjOL4j0QfFyfSI3NrRyvvl24}
- SECRET_KEY_BASE=${SECRET_KEY_BASE:-UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq}
- ERL_AFLAGS=-proto_dist inet_tcp
- DNS_NODES=
- RLIMIT_NOFILE=10000
- APP_NAME=realtime
- SEED_SELF_HOST=true
- RUN_JANITOR=true
networks:
- supabase
deploy:
replicas: 1
placement:
constraints:
- node.hostname == little
restart_policy:
condition: any
delay: 5s
max_attempts: 3
storage:
image: supabase/storage-api:v1.25.7
restart: unless-stopped
volumes:
- /home/trav/supabase/docker/volumes/storage:/var/lib/storage:z
healthcheck:
test:
- CMD
- wget
- --no-verbose
- --tries=1
- --spider
- http://127.0.0.1:5000/status
timeout: 5s
interval: 5s
retries: 3
depends_on:
- db
- rest
- imgproxy
environment:
- ANON_KEY=${ANON_KEY:-eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwiaWF0IjoxNjQxNzY5MjAwLCJleHAiOjE3OTk1MzU2MDAsInJvbGUiOiJhbm9uIn0.eO2Vi38564_lLqhEXR3s7nLbHL34014jYe05y9lpr1U}
- SERVICE_KEY=${SERVICE_ROLE_KEY:-eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwiaWF0IjoxNjQxNzY5MjAwLCJleHAiOjE3OTk1MzU2MDAsInJvbGUiOiJzZXJ2aWNlX3JvbGUifQ.l11N14S64uypRkNCbbWm4kV_DuqEIrsEySeBB4YNH1k}
- POSTGREST_URL=http://rest:3000
- PGRST_JWT_SECRET=${JWT_SECRET:-UhauiNFrEjOL4j0QfFyfSI3NrRyvvl24}
- DATABASE_URL=postgres://supabase_storage_admin:${POSTGRES_PASSWORD:-squirtle123456}@${POSTGRES_HOST:-db}:${POSTGRES_PORT:-5432}/${POSTGRES_DB:-postgres}
- FILE_SIZE_LIMIT=52428800
- STORAGE_BACKEND=file
- FILE_STORAGE_BACKEND_PATH=/var/lib/storage
- TENANT_ID=stub
- REGION=stub
- GLOBAL_S3_BUCKET=stub
- ENABLE_IMAGE_TRANSFORMATION=true
- IMGPROXY_URL=http://imgproxy:5001
networks:
- supabase
deploy:
replicas: 1
placement:
constraints:
- node.hostname == little
restart_policy:
condition: any
delay: 5s
max_attempts: 3
imgproxy:
image: darthsim/imgproxy:v3.8.0
restart: unless-stopped
volumes:
- /home/trav/supabase/docker/volumes/storage:/var/lib/storage:z
healthcheck:
test:
- CMD
- imgproxy
- health
timeout: 5s
interval: 5s
retries: 3
environment:
- IMGPROXY_BIND=:5001
- IMGPROXY_LOCAL_FILESYSTEM_ROOT=/
- IMGPROXY_USE_ETAG=true
- IMGPROXY_ENABLE_WEBP_DETECTION=${IMGPROXY_ENABLE_WEBP_DETECTION:-true}
networks:
- supabase
deploy:
replicas: 1
placement:
constraints:
- node.hostname == little
restart_policy:
condition: any
delay: 5s
max_attempts: 3
meta:
image: supabase/postgres-meta:v0.91.0
restart: unless-stopped
depends_on:
- db
- analytics
environment:
- PG_META_PORT=8080
- PG_META_DB_HOST=${POSTGRES_HOST:-db}
- PG_META_DB_PORT=${POSTGRES_PORT:-5432}
- PG_META_DB_NAME=${POSTGRES_DB:-postgres}
- PG_META_DB_USER=supabase_admin
- PG_META_DB_PASSWORD=${POSTGRES_PASSWORD:-squirtle123456}
networks:
- supabase
deploy:
replicas: 1
placement:
constraints:
- node.hostname == little
restart_policy:
condition: any
delay: 5s
max_attempts: 3
functions:
image: supabase/edge-runtime:v1.69.6
restart: unless-stopped
volumes:
- /home/trav/supabase/docker/volumes/functions:/home/deno/functions:Z
depends_on:
- analytics
environment:
- JWT_SECRET=${JWT_SECRET:-UhauiNFrEjOL4j0QfFyfSI3NrRyvvl24}
- SUPABASE_URL=http://kong:8000
- SUPABASE_ANON_KEY=${ANON_KEY:-eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwiaWF0IjoxNjQxNzY5MjAwLCJleHAiOjE3OTk1MzU2MDAsInJvbGUiOiJhbm9uIn0.eO2Vi38564_lLqhEXR3s7nLbHL34014jYe05y9lpr1U}
- SUPABASE_SERVICE_ROLE_KEY=${SERVICE_ROLE_KEY:-eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwiaWF0IjoxNjQxNzY5MjAwLCJleHAiOjE3OTk1MzU2MDAsInJvbGUiOiJzZXJ2aWNlX3JvbGUifQ.l11N14S64uypRkNCbbWm4kV_DuqEIrsEySeBB4YNH1k}
- SUPABASE_DB_URL=postgresql://postgres:${POSTGRES_PASSWORD:-squirtle123456}@${POSTGRES_HOST:-db}:${POSTGRES_PORT:-5432}/${POSTGRES_DB:-postgres}
- VERIFY_JWT=${FUNCTIONS_VERIFY_JWT:-false}
command:
- start
- --main-service
- /home/deno/functions/main
networks:
- supabase
deploy:
replicas: 1
placement:
constraints:
- node.hostname == little
restart_policy:
condition: any
delay: 5s
max_attempts: 3
analytics:
image: supabase/logflare:1.14.2
restart: unless-stopped
expose:
- 4000
healthcheck:
test:
- CMD
- curl
- http://localhost:4000/health
timeout: 5s
interval: 5s
retries: 10
depends_on:
- db
environment:
- LOGFLARE_NODE_HOST=127.0.0.1
- DB_USERNAME=supabase_admin
- DB_DATABASE=_supabase
- DB_HOSTNAME=db
- DB_PORT=${POSTGRES_PORT:-5432}
- DB_PASSWORD=${POSTGRES_PASSWORD:-squirtle123456}
- DB_SCHEMA=_analytics
- LOGFLARE_PUBLIC_ACCESS_TOKEN=${LOGFLARE_PUBLIC_ACCESS_TOKEN:-8NfbHaslrNnz9cLFa9ItO03y9Q0btpB1}
- LOGFLARE_PRIVATE_ACCESS_TOKEN=${LOGFLARE_PRIVATE_ACCESS_TOKEN:-UhauiNFrEjOL4j0QfFyfSI3NrRyvvl24}
- LOGFLARE_SINGLE_TENANT=true
- LOGFLARE_SUPABASE_MODE=true
- LOGFLARE_MIN_CLUSTER_SIZE=1
- POSTGRES_BACKEND_URL=postgresql://supabase_admin:${POSTGRES_PASSWORD:-squirtle123456}@db:5432/_supabase
- POSTGRES_BACKEND_SCHEMA=_analytics
- LOGFLARE_FEATURE_FLAG_OVERRIDE=multibackend=true
networks:
- supabase
deploy:
replicas: 1
placement:
constraints:
- node.hostname == little
restart_policy:
condition: any
delay: 5s
max_attempts: 3
db:
image: supabase/postgres:15.8.1.060
restart: unless-stopped
ports:
- 54323:5432
volumes:
- /home/trav/supabase/docker/volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z
- /home/trav/supabase/docker/volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z
- /home/trav/supabase/docker/volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z
- /home/trav/supabase/docker/volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z
- /home/trav/supabase/docker/volumes/db/data:/var/lib/postgresql/data:Z
- /home/trav/supabase/docker/volumes/db/_supabase.sql:/docker-entrypoint-initdb.d/migrations/97-_supabase.sql:Z
- /home/trav/supabase/docker/volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z
- /home/trav/supabase/docker/volumes/db/pooler.sql:/docker-entrypoint-initdb.d/migrations/99-pooler.sql:Z
- db-config:/etc/postgresql-custom
healthcheck:
test:
- CMD
- pg_isready
- -U
- postgres
- -h
- localhost
interval: 5s
timeout: 5s
retries: 10
depends_on:
- vector
environment:
- POSTGRES_HOST=/var/run/postgresql
- PGPORT=${POSTGRES_PORT:-5432}
- POSTGRES_PORT=${POSTGRES_PORT:-5432}
- PGPASSWORD=${POSTGRES_PASSWORD:-squirtle123456}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-squirtle123456}
- PGDATABASE=${POSTGRES_DB:-postgres}
- POSTGRES_DB=${POSTGRES_DB:-postgres}
- JWT_SECRET=${JWT_SECRET:-UhauiNFrEjOL4j0QfFyfSI3NrRyvvl24}
- JWT_EXP=${JWT_EXPIRY:-3600}
command:
- postgres
- -c
- config_file=/etc/postgresql/postgresql.conf
- -c
- log_min_messages=fatal
networks:
- supabase
deploy:
replicas: 1
placement:
constraints:
- node.hostname == little
restart_policy:
condition: any
delay: 5s
max_attempts: 3
vector:
image: timberio/vector:0.28.1-alpine
restart: unless-stopped
volumes:
- /home/trav/supabase/docker/volumes/logs/vector.yml:/etc/vector/vector.yml:ro,z
- ${DOCKER_SOCKET_LOCATION:-/var/run/docker.sock}:/var/run/docker.sock:ro,z
healthcheck:
test:
- CMD
- wget
- --no-verbose
- --tries=1
- --spider
- http://127.0.0.1:9001/health
timeout: 5s
interval: 5s
retries: 3
environment:
- LOGFLARE_PUBLIC_ACCESS_TOKEN=${LOGFLARE_PUBLIC_ACCESS_TOKEN:-8NfbHaslrNnz9cLFa9ItO03y9Q0btpB1}
command:
- --config
- /etc/vector/vector.yml
security_opt:
- label=disable
networks:
- supabase
deploy:
replicas: 1
placement:
constraints:
- node.hostname == little
restart_policy:
condition: any
delay: 5s
max_attempts: 3
supavisor:
image: supabase/supavisor:2.5.7
restart: unless-stopped
expose:
- ${POSTGRES_PORT:-5432}
- ${POOLER_PROXY_PORT_TRANSACTION:-6543}
volumes:
- /home/trav/supabase/docker/volumes/pooler/pooler.exs:/etc/pooler/pooler.exs:ro,z
healthcheck:
test:
- CMD
- curl
- -sSfL
- --head
- -o
- /dev/null
- http://127.0.0.1:4000/api/health
interval: 10s
timeout: 5s
retries: 5
depends_on:
- db
- analytics
environment:
- PORT=4000
- POSTGRES_PORT=${POSTGRES_PORT:-5432}
- POSTGRES_DB=${POSTGRES_DB:-postgres}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-squirtle123456}
- DATABASE_URL=ecto://supabase_admin:${POSTGRES_PASSWORD:-squirtle123456}@db:5432/_supabase
- CLUSTER_POSTGRES=true
- SECRET_KEY_BASE=${SECRET_KEY_BASE:-UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq}
- VAULT_ENC_KEY=${VAULT_ENC_KEY:-1duecZi6cWfjxore5jbwee8kF1At8p4G}
- API_JWT_SECRET=${JWT_SECRET:-UhauiNFrEjOL4j0QfFyfSI3NrRyvvl24}
- METRICS_JWT_SECRET=${JWT_SECRET:-UhauiNFrEjOL4j0QfFyfSI3NrRyvvl24}
- REGION=local
- ERL_AFLAGS=-proto_dist inet_tcp
- POOLER_TENANT_ID=${POOLER_TENANT_ID:-your-tenant-id}
- POOLER_DEFAULT_POOL_SIZE=${POOLER_DEFAULT_POOL_SIZE:-20}
- POOLER_MAX_CLIENT_CONN=${POOLER_MAX_CLIENT_CONN:-100}
- POOLER_POOL_MODE=transaction
- DB_POOL_SIZE=${POOLER_DB_POOL_SIZE:-5}
command:
- /bin/sh
- -c
- /app/bin/migrate && /app/bin/supavisor eval "$$(cat /etc/pooler/pooler.exs)" && /app/bin/server
networks:
- supabase
deploy:
replicas: 1
placement:
constraints:
- node.hostname == little
restart_policy:
condition: any
delay: 5s
max_attempts: 3
volumes:
db-config: null
networks:
supabase:
driver: overlay
attachable: true

View File

@@ -0,0 +1,349 @@
# You can leave this on "local". If you change it to production most console commands will ask for extra confirmation.
# Never set it to "testing".
APP_ENV=production
# Set to true if you want to see debug information in error screens.
APP_DEBUG=false
# This should be your email address.
# If you use Docker or similar, you can set this variable from a file by using SITE_OWNER_FILE
# The variable is used in some errors shown to users who aren't admin.
SITE_OWNER=travis.vas@gmail.com
# The encryption key for your sessions. Keep this very secure.
# Change it to a string of exactly 32 chars or use something like `php artisan key:generate` to generate it.
# If you use Docker or similar, you can set this variable from a file by using APP_KEY_FILE
#
# Avoid the "#" character in your APP_KEY, it may break things.
#
APP_KEY=9b04b3d166e506dc9756aca5b8d82f55
# Firefly III will launch using this language (for new users and unauthenticated visitors)
# For a list of available languages: https://github.com/firefly-iii/firefly-iii/blob/main/config/firefly.php#L123
#
# If text is still in English, remember that not everything may have been translated.
DEFAULT_LANGUAGE=en_US
# The locale defines how numbers are formatted.
# by default this value is the same as whatever the language is.
DEFAULT_LOCALE=equal
# Change this value to your preferred time zone.
# Example: Europe/Amsterdam
# For a list of supported time zones, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
TZ=America/New_york
# TRUSTED_PROXIES is a useful variable when using Docker and/or a reverse proxy.
# Set it to ** and reverse proxies work just fine.
TRUSTED_PROXIES=*
# The log channel defines where your log entries go to.
# Several other options exist. You can use 'single' for one big fat error log (not recommended).
# Also available are 'syslog', 'errorlog' and 'stdout' which will log to the system itself.
# A rotating log option is 'daily', creates 5 files that (surprise) rotate.
# A cool option is 'papertrail' for cloud logging
# Default setting 'stack' will log to 'daily' and to 'stdout' at the same time.
LOG_CHANNEL=stack
# Log level. You can set this from least severe to most severe:
# debug, info, notice, warning, error, critical, alert, emergency
# If you set it to debug your logs will grow large, and fast. If you set it to emergency probably
# nothing will get logged, ever.
APP_LOG_LEVEL=notice
# Audit log level.
# The audit log is used to log notable Firefly III events on a separate channel.
# These log entries may contain sensitive financial information.
# The audit log is disabled by default.
#
# To enable it, set AUDIT_LOG_LEVEL to "info"
# To disable it, set AUDIT_LOG_LEVEL to "emergency"
AUDIT_LOG_LEVEL=emergency
#
# If you want, you can redirect the audit logs to another channel.
# Set 'audit_stdout', 'audit_syslog', 'audit_errorlog' to log to the system itself.
# Use audit_daily to log to a rotating file.
# Use audit_papertrail to log to papertrail.
#
# If you do this, the audit logs may be mixed with normal logs because the settings for these channels
# are often the same as the settings for the normal logs.
AUDIT_LOG_CHANNEL=
#
# Used when logging to papertrail:
# Also used when audit logs log to papertrail:
#
PAPERTRAIL_HOST=
PAPERTRAIL_PORT=
# Database credentials. Make sure the database exists. I recommend a dedicated user for Firefly III
# For other database types, please see the FAQ: https://docs.firefly-iii.org/references/faq/install/#i-want-to-use-sqlite
# If you use Docker or similar, you can set these variables from a file by appending them with _FILE
# Use "pgsql" for PostgreSQL
# Use "mysql" for MySQL and MariaDB.
# Use "sqlite" for SQLite.
DB_CONNECTION=mysql
DB_HOST=db
DB_PORT=3306
DB_DATABASE=firefly
DB_USERNAME=firefly
DB_PASSWORD=squirtle123456
# leave empty or omit when not using a socket connection
DB_SOCKET=
# MySQL supports SSL. You can configure it here.
# If you use Docker or similar, you can set these variables from a file by appending them with _FILE
MYSQL_USE_SSL=false
MYSQL_SSL_VERIFY_SERVER_CERT=true
# You need to set at least of these options
MYSQL_SSL_CAPATH=/etc/ssl/certs/
MYSQL_SSL_CA=
MYSQL_SSL_CERT=
MYSQL_SSL_KEY=
MYSQL_SSL_CIPHER=
# PostgreSQL supports SSL. You can configure it here.
# If you use Docker or similar, you can set these variables from a file by appending them with _FILE
PGSQL_SSL_MODE=prefer
PGSQL_SSL_ROOT_CERT=null
PGSQL_SSL_CERT=null
PGSQL_SSL_KEY=null
PGSQL_SSL_CRL_FILE=null
# For postgresql 15 and up, setting this to public will no longer work as expected, becasuse the
# 'public' schema is without grants. This can be worked around by having a super user grant those
# necessary privileges, but in security conscious setups that's not viable.
# You will need to set this to the schema you want to use.
PGSQL_SCHEMA=public
# If you're looking for performance improvements, you could install memcached or redis
CACHE_DRIVER=file
SESSION_DRIVER=file
# If you set either of the options above to 'redis', you might want to update these settings too
# If you use Docker or similar, you can set REDIS_HOST_FILE, REDIS_PASSWORD_FILE or
# REDIS_PORT_FILE to set the value from a file instead of from an environment variable
# can be tcp or unix. http is not supported
REDIS_SCHEME=tcp
# use only when using 'unix' for REDIS_SCHEME. Leave empty otherwise.
REDIS_PATH=
# use only when using 'tcp' or 'http' for REDIS_SCHEME. Leave empty otherwise.
REDIS_HOST=192.168.50.210
REDIS_PORT=6379
# Use only with Redis 6+ with proper ACL set. Leave empty otherwise.
REDIS_USERNAME=
REDIS_PASSWORD=
# always use quotes and make sure redis db "0" and "1" exists. Otherwise change accordingly.
REDIS_DB="8"
REDIS_CACHE_DB="9"
# Cookie settings. Should not be necessary to change these.
# If you use Docker or similar, you can set COOKIE_DOMAIN_FILE to set
# the value from a file instead of from an environment variable
# Setting samesite to "strict" may give you trouble logging in.
COOKIE_PATH="/"
COOKIE_DOMAIN=
COOKIE_SECURE=false
COOKIE_SAMESITE=lax
# If you want Firefly III to email you, update these settings
# For instructions, see: https://docs.firefly-iii.org/how-to/firefly-iii/advanced/notifications/#email
# If you use Docker or similar, you can set these variables from a file by appending them with _FILE
MAIL_MAILER=log
MAIL_HOST=null
MAIL_PORT=2525
MAIL_FROM=changeme@example.com
MAIL_USERNAME=null
MAIL_PASSWORD=null
MAIL_ENCRYPTION=null
MAIL_SENDMAIL_COMMAND=
#
# If you use self-signed certificates for your STMP server, you can use the following settings.
#
MAIL_ALLOW_SELF_SIGNED=false
MAIL_VERIFY_PEER=true
MAIL_VERIFY_PEER_NAME=true
# Other mail drivers:
# If you use Docker or similar, you can set these variables from a file by appending them with _FILE
MAILGUN_DOMAIN=thingswithstuff.io
MAILGUN_SECRET=5092859788e1304d7b28901fe41bd6a7-51afd2db-b4b55a79
# If you are on EU region in mailgun, use api.eu.mailgun.net, otherwise use api.mailgun.net
# If you use Docker or similar, you can set this variable from a file by appending it with _FILE
MAILGUN_ENDPOINT=api.mailgun.net
# If you use Docker or similar, you can set these variables from a file by appending them with _FILE
MANDRILL_SECRET=
SPARKPOST_SECRET=
MAILERSEND_API_KEY=
# Firefly III can send you the following messages.
SEND_ERROR_MESSAGE=true
# These messages contain (sensitive) transaction information:
SEND_REPORT_JOURNALS=true
# Set this value to true if you want to set the location of certain things, like transactions.
# Since this involves an external service, it's optional and disabled by default.
ENABLE_EXTERNAL_MAP=false
#
# Enable or disable exchange rate conversion.
#
ENABLE_EXCHANGE_RATES=false
# Set this value to true if you want Firefly III to download currency exchange rates
# from the internet. These rates are hosted by the creator of Firefly III inside
# an Azure Storage Container.
# Not all currencies may be available. Rates may be wrong.
ENABLE_EXTERNAL_RATES=false
# The map will default to this location:
MAP_DEFAULT_LAT=51.983333
MAP_DEFAULT_LONG=5.916667
MAP_DEFAULT_ZOOM=6
#
# Some objects have room for an URL, like transactions and webhooks.
# By default, the following protocols are allowed:
# http, https, ftp, ftps, mailto
#
# To change this, set your preferred comma separated set below.
# Be sure to include http, https and other default ones if you need to.
#
VALID_URL_PROTOCOLS=
#
# Firefly III authentication settings
#
#
# Firefly III supports a few authentication methods:
# - 'web' (default, uses built in DB)
# - 'remote_user_guard' for Authelia etc
# Read more about these settings in the documentation.
# https://docs.firefly-iii.org/how-to/firefly-iii/advanced/authentication/
#
# LDAP is no longer supported :(
#
AUTHENTICATION_GUARD=web
#
# Remote user guard settings
#
AUTHENTICATION_GUARD_HEADER=REMOTE_USER
AUTHENTICATION_GUARD_EMAIL=
#
# Firefly III generates a basic keypair for your OAuth tokens.
# If you want, you can overrule the key with your own (secure) value.
# It's also possible to set PASSPORT_PUBLIC_KEY_FILE or PASSPORT_PRIVATE_KEY_FILE
# if you're using Docker secrets or similar solutions for secret management
#
PASSPORT_PRIVATE_KEY=
PASSPORT_PUBLIC_KEY=
#
# Extra authentication settings
#
CUSTOM_LOGOUT_URL=
# You can disable the X-Frame-Options header if it interferes with tools like
# Organizr. This is at your own risk. Applications running in frames run the risk
# of leaking information to their parent frame.
DISABLE_FRAME_HEADER=false
# You can disable the Content Security Policy header when you're using an ancient browser
# or any version of Microsoft Edge / Internet Explorer (which amounts to the same thing really)
# This leaves you with the risk of not being able to stop XSS bugs should they ever surface.
# This is at your own risk.
DISABLE_CSP_HEADER=false
# If you wish to track your own behavior over Firefly III, set valid analytics tracker information here.
# Nobody uses this except for me on the demo site. But hey, feel free to use this if you want to.
# Do not prepend the TRACKER_URL with http:// or https://
# The only tracker supported is Matomo.
# You can set the following variables from a file by appending them with _FILE:
TRACKER_SITE_ID=
TRACKER_URL=
#
# You can automatically submit errors to the Firefly III developer using sentry.io
#
# This is entirely optional of course. If you run into errors, I will gladly accept GitHub
# issues or a forwared email message.
#
# If you set this to true, your installation will try to contact sentry.io when it runs into errors.
#
REPORT_ERRORS_ONLINE=false
#
# Firefly III supports webhooks. These are security sensitive and must be enabled manually first.
#
ALLOW_WEBHOOKS=false
#
# The static cron job token can be useful when you use Docker and wish to manage cron jobs.
# 1. Set this token to any 32-character value (this is important!).
# 2. Use this token in the cron URL instead of a user's command line token that you can find in /profile
#
# For more info: https://docs.firefly-iii.org/how-to/firefly-iii/advanced/cron/
#
# You can set this variable from a file by appending it with _FILE
#
STATIC_CRON_TOKEN=c6b72d0bcfac1fb9939b57511cbaf4d8
# You can fine tune the start-up of a Docker container by editing these environment variables.
# Use this at your own risk. Disabling certain checks and features may result in lots of inconsistent data.
# However if you know what you're doing you can significantly speed up container start times.
# Set each value to true to enable, or false to disable.
# Check if the SQLite database exists. Can be skipped if you're not using SQLite.
# Won't significantly speed up things.
DKR_CHECK_SQLITE=true
# Leave the following configuration vars as is.
# Unless you like to tinker and know what you're doing.
APP_NAME=FireflyIII
BROADCAST_DRIVER=log
QUEUE_DRIVER=sync
CACHE_PREFIX=firefly
PUSHER_KEY=
IPINFO_TOKEN=
PUSHER_SECRET=
PUSHER_ID=
DEMO_USERNAME=
DEMO_PASSWORD=
#
# Disable or enable the running balance column data.
# If you enable this, please also run "php artisan firefly-iii:correct-database"
# This will take some time the first run.
#
USE_RUNNING_BALANCE=true
#
# The v2 layout is very experimental. If it breaks you get to keep both parts.
# Be wary of data loss.
#
FIREFLY_III_LAYOUT=v1
#
# Which Query Parser implementation to use for the search engine and rules
# 'new' is experimental, 'legacy' is the classic one
#
QUERY_PARSER_IMPLEMENTATION=new
#
# Please make sure this URL matches the external URL of your Firefly III installation.
# It is used to validate specific requests and to generate URLs in emails.
#
APP_URL=http://fireflies.lab

View File

@@ -0,0 +1,77 @@
version: "3.8"
services:
postgresql:
image: postgres:16
env_file:
- stack.env
environment:
POSTGRES_USER: ${POSTGRESQL_USER:-zipline}
POSTGRES_PASSWORD: ${POSTGRESQL_PASSWORD:?POSTGRESSQL_PASSWORD is required}
POSTGRES_DB: ${POSTGRESQL_DB:-zipline}
volumes:
- pgdata:/var/lib/postgresql/data
healthcheck:
test: ["CMD", "pg_isready", "-U", "zipline"]
interval: 10s
timeout: 5s
retries: 5
networks:
- net
deploy:
replicas: 1
placement:
constraints:
- node.hostname == crackbox
restart_policy:
condition: any
delay: 5s
max_attempts: 3
zipline:
image: ghcr.io/diced/zipline:latest
ports:
- target: 3000
published: 3000
protocol: tcp
mode: ingress
env_file:
- .env
environment:
- DATABASE_URL=postgres://${POSTGRESQL_USER:-zipline}:${POSTGRESQL_PASSWORD}@postgresql:5432/${POSTGRESQL_DB:-zipline}
depends_on:
- postgresql
volumes:
- zipline_uploads:/zipline/uploads
- zipline_public:/zipline/public
- zipline_themes:/zipline/themes
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://0.0.0.0:3000/api/healthcheck"]
interval: 15s
timeout: 2s
retries: 2
networks:
- net
deploy:
replicas: 1
placement:
constraints:
- node.hostname == crackbox
restart_policy:
condition: any
delay: 5s
max_attempts: 3
volumes:
pgdata:
zipline_uploads:
zipline_public:
zipline_themes:
networks:
net:
driver: overlay
attachable: true
badge-net:
external: true

View File

@@ -9,7 +9,7 @@ services:
- "8080:8080"
networks:
- public
- badge-net
# - badge-net
volumes:
- "/var/run/docker.sock:/var/run/docker.sock:ro"
- "/home/trav/portainer/traefik:/etc/traefik:rw"
@@ -30,8 +30,8 @@ services:
AGENT_SECRET: portainer_agent_secret_key_2024
AGENT_PORT: 9001
LOG_LEVEL: WARN
ports:
- "9001:9001"
# ports:
# - "9001:9001"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /var/lib/docker/volumes:/var/lib/docker/volumes
@@ -48,16 +48,16 @@ services:
portainer:
image: portainer/portainer-ee:lts
command: -H tcp://tasks.agent:9001 --tlsskipverify
ports:
- "9443:9443"
- "9000:9000"
- "8000:8000"
# ports:
# - "9443:9443"
# - "9000:9000"
# - "8000:8000"
volumes:
- portainer_data:/data
environment:
AGENT_SECRET: portainer_agent_secret_key_2024
networks:
- public
# - public
- portainer_agent_network
- badge-net
deploy:
@@ -66,17 +66,17 @@ services:
placement:
constraints: [node.role == manager]
labels:
- "traefik.enable=true"
- "traefik.swarm.network=public"
- "traefik.http.routers.portainer.rule=Host(`dock.toy`)"
- "traefik.http.routers.portainer.entrypoints=web"
- "traefik.http.services.portainer.loadbalancer.server.port=9000"
- "traefik.http.routers.portainer.service=portainer"
# Edge
- "traefik.http.routers.edge.rule=Host(`edge.toy`)"
- "traefik.http.routers.edge.entrypoints=web"
- "traefik.http.services.edge.loadbalancer.server.port=8000"
- "traefik.http.routers.edge.service=edge"
- "traefik.enable=false"
# - "traefik.swarm.network=public"
# - "traefik.http.routers.portainer.rule=Host(`dock.toy`)"
# - "traefik.http.routers.portainer.entrypoints=web"
# - "traefik.http.services.portainer.loadbalancer.server.port=9000"
# - "traefik.http.routers.portainer.service=portainer"
# # Edge
# - "traefik.http.routers.edge.rule=Host(`edge.toy`)"
# - "traefik.http.routers.edge.entrypoints=web"
# - "traefik.http.services.edge.loadbalancer.server.port=8000"
# - "traefik.http.routers.edge.service=edge"
networks:
public:

81
heal_portainer_agents.sh Executable file
View File

@@ -0,0 +1,81 @@
#!/usr/bin/env bash
# Configuration
HOSTS_FILE="$HOME/portainer/agent_hosts.txt"
AGENT_CONTAINER_NAME="portainer_agent"
AGENT_IMAGE="portainer/agent:latest"
# Adjust these according to how you originally ran the agent:
DOCKER_RUN_CMD=(
docker run -d \
--name "${AGENT_CONTAINER_NAME}" \
--restart=always \
-p 9001:9001 \
-v /var/run/docker.sock:/var/run/docker.sock \
-v /var/lib/docker/volumes:/var/lib/docker/volumes \
"${AGENT_IMAGE}"
)
if [[ ! -f "$HOSTS_FILE" ]]; then
echo "Hosts file not found: $HOSTS_FILE"
exit 1
fi
while read -r HOST; do
[[ -z "$HOST" || "$HOST" =~ ^# ]] && continue
echo "==> Checking host: $HOST"
ssh "$HOST" bash -s << 'REMOTE_EOF'
AGENT_CONTAINER_NAME="portainer_agent"
AGENT_IMAGE="portainer/agent:latest"
# Function to echo with host prefix
log() {
echo "[$(hostname)] $*"
}
if ! command -v docker &>/dev/null; then
log "docker not installed or not in PATH"
exit 1
fi
# Is container present?
if docker ps -a --format '{{.Names}}' | grep -wq "$AGENT_CONTAINER_NAME"; then
# Is it running?
if docker ps --format '{{.Names}}' | grep -wq "$AGENT_CONTAINER_NAME"; then
log "Agent container '$AGENT_CONTAINER_NAME' is already running"
exit 0
else
log "Agent container exists but is stopped, starting..."
if docker start "$AGENT_CONTAINER_NAME"; then
log "Agent container started"
exit 0
else
log "Failed to start agent container, will try to recreate"
docker rm -f "$AGENT_CONTAINER_NAME" || true
fi
fi
else
log "Agent container '$AGENT_CONTAINER_NAME' not found, creating..."
fi
# Recreate container with the same options you use everywhere
log "Running new agent container..."
docker run -d \
--name "\$AGENT_CONTAINER_NAME" \
--restart=always \
-p 9001:9001 \
-v /var/run/docker.sock:/var/run/docker.sock \
-v /var/lib/docker/volumes:/var/lib/docker/volumes \
"\$AGENT_IMAGE"
RET=\$?
if [[ \$RET -eq 0 ]]; then
log "Agent container created and started"
else
log "Failed to create agent container (exit \$RET)"
fi
REMOTE_EOF
done < "$HOSTS_FILE"