This commit is contained in:
2025-11-26 18:06:47 +00:00
parent c59e356684
commit 19857fc94f
5 changed files with 35 additions and 36 deletions

0
docker/volumes/db/_supabase.sql Executable file → Normal file
View File

0
docker/volumes/db/jwt.sql Executable file → Normal file
View File

0
docker/volumes/db/logs.sql Executable file → Normal file
View File

0
docker/volumes/db/pooler.sql Executable file → Normal file
View File

View File

@@ -33,9 +33,9 @@ transforms:
kong: '.appname == "supabase-kong"'
auth: '.appname == "supabase-auth"'
rest: '.appname == "supabase-rest"'
realtime: '.appname == "supabase-realtime"'
realtime: '.appname == "realtime-dev.supabase-realtime"'
storage: '.appname == "supabase-storage"'
functions: '.appname == "supabase-functions"'
functions: '.appname == "supabase-edge-functions"'
db: '.appname == "supabase-db"'
# Ignores non nginx errors since they are related with kong booting up
kong_logs:
@@ -101,24 +101,7 @@ transforms:
parsed, err = parse_regex(.event_message, r'^(?P<time>.*): (?P<msg>.*)$')
if err == null {
.event_message = parsed.msg
ts, ts_err = parse_timestamp(parsed.time, format: "%+")
if ts_err != null {
ts, ts_err = parse_timestamp(parsed.time, format: "%Y-%m-%d %H:%M:%S%.f%:z")
}
if ts_err != null {
ts, ts_err = parse_timestamp(parsed.time, format: "%Y-%m-%d %H:%M:%S%.f%z")
}
if ts_err != null {
ts, ts_err = parse_timestamp(parsed.time, format: "%Y-%m-%d %H:%M:%S%z")
}
if ts_err != null {
ts, ts_err = parse_timestamp(parsed.time, format: "%Y-%m-%d %H:%M:%S")
}
if ts_err == null {
.timestamp = ts
} else {
.timestamp = parsed.time
}
.timestamp = to_timestamp!(parsed.time)
.metadata.host = .project
}
# Realtime logs are structured so we parse the severity level using regex (ignore time because it has no date)
@@ -134,6 +117,13 @@ transforms:
.event_message = parsed.msg
.metadata.level = parsed.level
}
# Function logs are unstructured messages on stderr
functions_logs:
type: remap
inputs:
- router.functions
source: |-
.metadata.project_ref = del(.project)
# Storage logs may contain json objects so we parse them for completeness
storage_logs:
type: remap
@@ -182,8 +172,9 @@ sinks:
method: 'post'
request:
retry_max_duration_secs: 10
timeout_secs: 30
uri: 'http://analytics:4000/api/logs?source_name=gotrue.logs.prod&api_key=${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required}'
headers:
x-api-key: ${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required}
uri: 'http://analytics:4000/api/logs?source_name=gotrue.logs.prod'
logflare_realtime:
type: 'http'
inputs:
@@ -193,8 +184,9 @@ sinks:
method: 'post'
request:
retry_max_duration_secs: 10
timeout_secs: 30
uri: 'http://analytics:4000/api/logs?source_name=realtime.logs.prod&api_key=${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required}'
headers:
x-api-key: ${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required}
uri: 'http://analytics:4000/api/logs?source_name=realtime.logs.prod'
logflare_rest:
type: 'http'
inputs:
@@ -204,8 +196,9 @@ sinks:
method: 'post'
request:
retry_max_duration_secs: 10
timeout_secs: 30
uri: 'http://analytics:4000/api/logs?source_name=postgREST.logs.prod&api_key=${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required}'
headers:
x-api-key: ${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required}
uri: 'http://analytics:4000/api/logs?source_name=postgREST.logs.prod'
logflare_db:
type: 'http'
inputs:
@@ -215,20 +208,24 @@ sinks:
method: 'post'
request:
retry_max_duration_secs: 10
timeout_secs: 30
# Send directly to analytics to avoid kong restarts dropping log ingestion
uri: 'http://analytics:4000/api/logs?source_name=postgres.logs&api_key=${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required}'
headers:
x-api-key: ${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required}
# We must route the sink through kong because ingesting logs before logflare is fully initialised will
# lead to broken queries from studio. This works by the assumption that containers are started in the
# following order: vector > db > logflare > kong
uri: 'http://kong:8000/analytics/v1/api/logs?source_name=postgres.logs'
logflare_functions:
type: 'http'
inputs:
- router.functions
- functions_logs
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
timeout_secs: 30
uri: 'http://analytics:4000/api/logs?source_name=deno-relay-logs&api_key=${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required}'
headers:
x-api-key: ${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required}
uri: 'http://analytics:4000/api/logs?source_name=deno-relay-logs'
logflare_storage:
type: 'http'
inputs:
@@ -238,8 +235,9 @@ sinks:
method: 'post'
request:
retry_max_duration_secs: 10
timeout_secs: 30
uri: 'http://analytics:4000/api/logs?source_name=storage.logs.prod.2&api_key=${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required}'
headers:
x-api-key: ${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required}
uri: 'http://analytics:4000/api/logs?source_name=storage.logs.prod.2'
logflare_kong:
type: 'http'
inputs:
@@ -250,5 +248,6 @@ sinks:
method: 'post'
request:
retry_max_duration_secs: 10
timeout_secs: 30
uri: 'http://analytics:4000/api/logs?source_name=cloudflare.logs.prod&api_key=${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required}'
headers:
x-api-key: ${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required}
uri: 'http://analytics:4000/api/logs?source_name=cloudflare.logs.prod'