All checks were successful
CI / Backend (push) Successful in 43s
CI / UI (push) Successful in 1m10s
Release / Check ui (push) Successful in 26s
Release / Test backend (push) Successful in 41s
CI / Backend (pull_request) Successful in 25s
Release / Docker / caddy (push) Successful in 47s
CI / UI (pull_request) Successful in 41s
Release / Docker / ui (push) Successful in 2m32s
Release / Docker / backend (push) Successful in 3m57s
Release / Docker / runner (push) Successful in 4m8s
Release / Gitea Release (push) Successful in 12s
449 lines
17 KiB
YAML
449 lines
17 KiB
YAML
# ── Shared environment fragments ──────────────────────────────────────────────
|
||
# These YAML anchors eliminate duplication between backend and runner.
|
||
# All values come from Doppler — no fallbacks needed here.
|
||
# Run commands via: just up / just build / etc. (see justfile)
|
||
x-infra-env: &infra-env
|
||
# MinIO
|
||
MINIO_ENDPOINT: "minio:9000"
|
||
MINIO_ACCESS_KEY: "${MINIO_ROOT_USER}"
|
||
MINIO_SECRET_KEY: "${MINIO_ROOT_PASSWORD}"
|
||
MINIO_USE_SSL: "false"
|
||
MINIO_PUBLIC_ENDPOINT: "${MINIO_PUBLIC_ENDPOINT}"
|
||
MINIO_PUBLIC_USE_SSL: "${MINIO_PUBLIC_USE_SSL}"
|
||
# PocketBase
|
||
POCKETBASE_URL: "http://pocketbase:8090"
|
||
POCKETBASE_ADMIN_EMAIL: "${POCKETBASE_ADMIN_EMAIL}"
|
||
POCKETBASE_ADMIN_PASSWORD: "${POCKETBASE_ADMIN_PASSWORD}"
|
||
# Meilisearch
|
||
MEILI_URL: "${MEILI_URL:-http://meilisearch:7700}"
|
||
MEILI_API_KEY: "${MEILI_MASTER_KEY}"
|
||
# Valkey
|
||
VALKEY_ADDR: "valkey:6379"
|
||
|
||
services:
|
||
# ─── MinIO (object storage: chapters, audio, avatars, browse) ────────────────
|
||
minio:
|
||
image: minio/minio:latest
|
||
restart: unless-stopped
|
||
command: server /data --console-address ":9001"
|
||
environment:
|
||
MINIO_ROOT_USER: "${MINIO_ROOT_USER}"
|
||
MINIO_ROOT_PASSWORD: "${MINIO_ROOT_PASSWORD}"
|
||
# No public port — all presigned URL traffic goes through backend or a
|
||
# separately-exposed MINIO_PUBLIC_ENDPOINT (e.g. storage.libnovel.cc).
|
||
expose:
|
||
- "9000"
|
||
- "9001"
|
||
volumes:
|
||
- minio_data:/data
|
||
healthcheck:
|
||
test: ["CMD", "mc", "ready", "local"]
|
||
interval: 10s
|
||
timeout: 5s
|
||
retries: 5
|
||
|
||
# ─── MinIO bucket initialisation ─────────────────────────────────────────────
|
||
minio-init:
|
||
image: minio/mc:latest
|
||
depends_on:
|
||
minio:
|
||
condition: service_healthy
|
||
entrypoint: >
|
||
/bin/sh -c "
|
||
mc alias set local http://minio:9000 $${MINIO_ROOT_USER} $${MINIO_ROOT_PASSWORD};
|
||
mc mb --ignore-existing local/chapters;
|
||
mc mb --ignore-existing local/audio;
|
||
mc mb --ignore-existing local/avatars;
|
||
mc mb --ignore-existing local/catalogue;
|
||
echo 'buckets ready';
|
||
"
|
||
environment:
|
||
MINIO_ROOT_USER: "${MINIO_ROOT_USER}"
|
||
MINIO_ROOT_PASSWORD: "${MINIO_ROOT_PASSWORD}"
|
||
|
||
# ─── PocketBase (auth + structured data) ─────────────────────────────────────
|
||
pocketbase:
|
||
image: ghcr.io/muchobien/pocketbase:latest
|
||
restart: unless-stopped
|
||
environment:
|
||
PB_ADMIN_EMAIL: "${POCKETBASE_ADMIN_EMAIL}"
|
||
PB_ADMIN_PASSWORD: "${POCKETBASE_ADMIN_PASSWORD}"
|
||
# No public port — accessed only by backend/runner on the internal network.
|
||
expose:
|
||
- "8090"
|
||
volumes:
|
||
- pb_data:/pb_data
|
||
healthcheck:
|
||
test: ["CMD", "wget", "-qO-", "http://localhost:8090/api/health"]
|
||
interval: 10s
|
||
timeout: 5s
|
||
retries: 5
|
||
|
||
# ─── PocketBase collection bootstrap ─────────────────────────────────────────
|
||
pb-init:
|
||
image: alpine:3.19
|
||
depends_on:
|
||
pocketbase:
|
||
condition: service_healthy
|
||
environment:
|
||
POCKETBASE_URL: "http://pocketbase:8090"
|
||
POCKETBASE_ADMIN_EMAIL: "${POCKETBASE_ADMIN_EMAIL}"
|
||
POCKETBASE_ADMIN_PASSWORD: "${POCKETBASE_ADMIN_PASSWORD}"
|
||
volumes:
|
||
- ./scripts/pb-init-v3.sh:/pb-init.sh:ro
|
||
entrypoint: ["sh", "/pb-init.sh"]
|
||
|
||
# ─── Meilisearch (full-text search) ──────────────────────────────────────────
|
||
meilisearch:
|
||
image: getmeili/meilisearch:latest
|
||
restart: unless-stopped
|
||
environment:
|
||
MEILI_MASTER_KEY: "${MEILI_MASTER_KEY}"
|
||
MEILI_ENV: "${MEILI_ENV}"
|
||
# No public port — backend/runner reach it via internal network.
|
||
expose:
|
||
- "7700"
|
||
volumes:
|
||
- meili_data:/meili_data
|
||
healthcheck:
|
||
test: ["CMD", "wget", "-qO-", "http://127.0.0.1:7700/health"]
|
||
interval: 10s
|
||
timeout: 5s
|
||
retries: 5
|
||
|
||
# ─── Valkey (presign URL cache) ───────────────────────────────────────────────
|
||
valkey:
|
||
image: valkey/valkey:7-alpine
|
||
restart: unless-stopped
|
||
# No public port — backend/runner/ui reach it via internal network.
|
||
expose:
|
||
- "6379"
|
||
volumes:
|
||
- valkey_data:/data
|
||
healthcheck:
|
||
test: ["CMD", "valkey-cli", "ping"]
|
||
interval: 10s
|
||
timeout: 5s
|
||
retries: 5
|
||
|
||
# ─── Redis (Asynq task queue — accessed locally by backend, remotely by homelab runner) ──
|
||
redis:
|
||
image: redis:7-alpine
|
||
restart: unless-stopped
|
||
command: >
|
||
redis-server
|
||
--appendonly yes
|
||
--requirepass "${REDIS_PASSWORD}"
|
||
# No public port — backend reaches it via internal network.
|
||
# Homelab runner reaches it via Caddy TLS proxy on :6380 → redis:6379.
|
||
expose:
|
||
- "6379"
|
||
volumes:
|
||
- redis_data:/data
|
||
healthcheck:
|
||
test: ["CMD", "redis-cli", "-a", "${REDIS_PASSWORD}", "ping"]
|
||
interval: 10s
|
||
timeout: 5s
|
||
retries: 5
|
||
|
||
# ─── Backend API ──────────────────────────────────────────────────────────────
|
||
backend:
|
||
image: kalekber/libnovel-backend:${GIT_TAG:-latest}
|
||
build:
|
||
context: ./backend
|
||
dockerfile: Dockerfile
|
||
target: backend
|
||
args:
|
||
VERSION: "${GIT_TAG}"
|
||
COMMIT: "${GIT_COMMIT}"
|
||
labels:
|
||
com.centurylinklabs.watchtower.enable: "true"
|
||
restart: unless-stopped
|
||
stop_grace_period: 35s
|
||
depends_on:
|
||
pb-init:
|
||
condition: service_completed_successfully
|
||
pocketbase:
|
||
condition: service_healthy
|
||
minio:
|
||
condition: service_healthy
|
||
meilisearch:
|
||
condition: service_healthy
|
||
valkey:
|
||
condition: service_healthy
|
||
redis:
|
||
condition: service_healthy
|
||
# No public port — all traffic is routed via Caddy.
|
||
expose:
|
||
- "8080"
|
||
environment:
|
||
<<: *infra-env
|
||
BACKEND_HTTP_ADDR: ":8080"
|
||
LOG_LEVEL: "${LOG_LEVEL}"
|
||
KOKORO_URL: "${KOKORO_URL}"
|
||
KOKORO_VOICE: "${KOKORO_VOICE}"
|
||
POCKET_TTS_URL: "${POCKET_TTS_URL}"
|
||
GLITCHTIP_DSN: "${GLITCHTIP_DSN}"
|
||
OTEL_EXPORTER_OTLP_ENDPOINT: "${OTEL_EXPORTER_OTLP_ENDPOINT}"
|
||
OTEL_SERVICE_NAME: "backend"
|
||
# Asynq task queue — backend enqueues jobs to local Redis sidecar.
|
||
# Homelab runner connects to the same Redis via Caddy TLS proxy on :6380.
|
||
REDIS_ADDR: "redis:6379"
|
||
REDIS_PASSWORD: "${REDIS_PASSWORD}"
|
||
healthcheck:
|
||
test: ["CMD", "/healthcheck", "http://localhost:8080/health"]
|
||
interval: 15s
|
||
timeout: 5s
|
||
retries: 3
|
||
|
||
# ─── Runner (background task worker) ─────────────────────────────────────────
|
||
# profiles: [runner] prevents accidental restart on `docker compose up -d`.
|
||
# The homelab runner (192.168.0.109) is the sole worker in production.
|
||
# To start explicitly: doppler run -- docker compose --profile runner up -d runner
|
||
runner:
|
||
profiles: [runner]
|
||
image: kalekber/libnovel-runner:${GIT_TAG:-latest}
|
||
build:
|
||
context: ./backend
|
||
dockerfile: Dockerfile
|
||
target: runner
|
||
args:
|
||
VERSION: "${GIT_TAG}"
|
||
COMMIT: "${GIT_COMMIT}"
|
||
labels:
|
||
com.centurylinklabs.watchtower.enable: "true"
|
||
restart: unless-stopped
|
||
stop_grace_period: 135s
|
||
depends_on:
|
||
pb-init:
|
||
condition: service_completed_successfully
|
||
pocketbase:
|
||
condition: service_healthy
|
||
minio:
|
||
condition: service_healthy
|
||
meilisearch:
|
||
condition: service_healthy
|
||
valkey:
|
||
condition: service_healthy
|
||
# Metrics endpoint — internal only; expose publicly via Caddy if needed.
|
||
expose:
|
||
- "9091"
|
||
environment:
|
||
<<: *infra-env
|
||
LOG_LEVEL: "${LOG_LEVEL}"
|
||
# Runner tuning
|
||
RUNNER_POLL_INTERVAL: "${RUNNER_POLL_INTERVAL}"
|
||
RUNNER_MAX_CONCURRENT_SCRAPE: "${RUNNER_MAX_CONCURRENT_SCRAPE}"
|
||
RUNNER_MAX_CONCURRENT_AUDIO: "${RUNNER_MAX_CONCURRENT_AUDIO}"
|
||
RUNNER_WORKER_ID: "${RUNNER_WORKER_ID}"
|
||
RUNNER_TIMEOUT: "${RUNNER_TIMEOUT}"
|
||
RUNNER_METRICS_ADDR: "${RUNNER_METRICS_ADDR}"
|
||
# Suppress the on-startup catalogue walk — catalogue_refresh now skips
|
||
# books already in Meilisearch, so a full walk on every restart is wasteful.
|
||
# The 24h periodic ticker (CatalogueRefreshInterval) still fires normally.
|
||
RUNNER_SKIP_INITIAL_CATALOGUE_REFRESH: "true"
|
||
# Kokoro-FastAPI TTS endpoint
|
||
KOKORO_URL: "${KOKORO_URL}"
|
||
KOKORO_VOICE: "${KOKORO_VOICE}"
|
||
POCKET_TTS_URL: "${POCKET_TTS_URL}"
|
||
GLITCHTIP_DSN: "${GLITCHTIP_DSN}"
|
||
OTEL_EXPORTER_OTLP_ENDPOINT: "${OTEL_EXPORTER_OTLP_ENDPOINT}"
|
||
OTEL_SERVICE_NAME: "runner"
|
||
healthcheck:
|
||
# 120s = 2× the default 30s poll interval with generous headroom.
|
||
test: ["CMD", "/healthcheck", "file", "/tmp/runner.alive", "120"]
|
||
interval: 60s
|
||
timeout: 5s
|
||
retries: 3
|
||
|
||
# ─── SvelteKit UI ─────────────────────────────────────────────────────────────
|
||
ui:
|
||
image: kalekber/libnovel-ui:${GIT_TAG:-latest}
|
||
build:
|
||
context: ./ui
|
||
dockerfile: Dockerfile
|
||
args:
|
||
BUILD_VERSION: "${GIT_TAG}"
|
||
BUILD_COMMIT: "${GIT_COMMIT}"
|
||
labels:
|
||
com.centurylinklabs.watchtower.enable: "true"
|
||
restart: unless-stopped
|
||
stop_grace_period: 35s
|
||
depends_on:
|
||
pb-init:
|
||
condition: service_completed_successfully
|
||
backend:
|
||
condition: service_healthy
|
||
pocketbase:
|
||
condition: service_healthy
|
||
valkey:
|
||
condition: service_healthy
|
||
# No public port — all traffic via Caddy.
|
||
expose:
|
||
- "3000"
|
||
environment:
|
||
# ORIGIN must match the public URL Caddy serves on.
|
||
# adapter-node uses this for SvelteKit's built-in CSRF origin check.
|
||
ORIGIN: "${ORIGIN}"
|
||
BACKEND_API_URL: "http://backend:8080"
|
||
POCKETBASE_URL: "http://pocketbase:8090"
|
||
POCKETBASE_ADMIN_EMAIL: "${POCKETBASE_ADMIN_EMAIL}"
|
||
POCKETBASE_ADMIN_PASSWORD: "${POCKETBASE_ADMIN_PASSWORD}"
|
||
AUTH_SECRET: "${AUTH_SECRET}"
|
||
DEBUG_LOGIN_TOKEN: "${DEBUG_LOGIN_TOKEN}"
|
||
PUBLIC_MINIO_PUBLIC_URL: "${MINIO_PUBLIC_ENDPOINT}"
|
||
# Valkey
|
||
VALKEY_ADDR: "valkey:6379"
|
||
# Umami analytics
|
||
PUBLIC_UMAMI_WEBSITE_ID: "${PUBLIC_UMAMI_WEBSITE_ID}"
|
||
PUBLIC_UMAMI_SCRIPT_URL: "${PUBLIC_UMAMI_SCRIPT_URL}"
|
||
# GlitchTip client + server-side error tracking
|
||
PUBLIC_GLITCHTIP_DSN: "${PUBLIC_GLITCHTIP_DSN}"
|
||
# OpenTelemetry tracing
|
||
OTEL_EXPORTER_OTLP_ENDPOINT: "${OTEL_EXPORTER_OTLP_ENDPOINT}"
|
||
OTEL_SERVICE_NAME: "ui"
|
||
# OAuth2 providers
|
||
GOOGLE_CLIENT_ID: "${GOOGLE_CLIENT_ID}"
|
||
GOOGLE_CLIENT_SECRET: "${GOOGLE_CLIENT_SECRET}"
|
||
GITHUB_CLIENT_ID: "${GITHUB_CLIENT_ID}"
|
||
GITHUB_CLIENT_SECRET: "${GITHUB_CLIENT_SECRET}"
|
||
healthcheck:
|
||
test: ["CMD", "wget", "-qO-", "http://127.0.0.1:3000/health"]
|
||
interval: 15s
|
||
timeout: 5s
|
||
retries: 3
|
||
|
||
# ─── CrowdSec (threat detection + IP blocking) ───────────────────────────────
|
||
# Reads Caddy JSON access logs from the shared caddy_logs volume and enforces
|
||
# decisions via the Caddy bouncer plugin.
|
||
crowdsec:
|
||
image: crowdsecurity/crowdsec:latest
|
||
restart: unless-stopped
|
||
environment:
|
||
GID: "1000"
|
||
COLLECTIONS: "crowdsecurity/caddy crowdsecurity/http-dos crowdsecurity/base-http-scenarios"
|
||
volumes:
|
||
- crowdsec_data:/var/lib/crowdsec/data
|
||
- ./crowdsec/acquis.yaml:/etc/crowdsec/acquis.yaml:ro
|
||
- caddy_logs:/var/log/caddy:ro
|
||
expose:
|
||
- "8080"
|
||
healthcheck:
|
||
test: ["CMD", "cscli", "version"]
|
||
interval: 20s
|
||
timeout: 10s
|
||
retries: 5
|
||
|
||
# ─── Dozzle agent ────────────────────────────────────────────────────────────
|
||
# Exposes prod container logs to the Dozzle instance on the homelab.
|
||
# The homelab Dozzle connects here via DOZZLE_REMOTE_AGENT.
|
||
# Port 7007 is bound to localhost only — not reachable from the internet.
|
||
dozzle-agent:
|
||
image: amir20/dozzle:latest
|
||
restart: unless-stopped
|
||
command: agent
|
||
volumes:
|
||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||
ports:
|
||
- "127.0.0.1:7007:7007"
|
||
|
||
# ─── CrowdSec bouncer registration ───────────────────────────────────────────
|
||
# One-shot: registers the Caddy bouncer with the CrowdSec LAPI and writes the
|
||
# generated API key to crowdsec/.crowdsec.env, which Caddy reads via env_file.
|
||
# Uses the Docker socket to exec cscli inside the running crowdsec container.
|
||
crowdsec-init:
|
||
image: docker:cli
|
||
depends_on:
|
||
crowdsec:
|
||
condition: service_healthy
|
||
volumes:
|
||
- /var/run/docker.sock:/var/run/docker.sock
|
||
- ./crowdsec:/crowdsec-out
|
||
entrypoint: >
|
||
/bin/sh -c "
|
||
out=/crowdsec-out/.crowdsec.env;
|
||
existing=$$(grep -s '^CROWDSEC_API_KEY=.' \"$$out\" | cut -d= -f2-);
|
||
if [ -n \"$$existing\" ]; then
|
||
echo 'crowdsec-init: key already present, skipping registration';
|
||
exit 0;
|
||
fi;
|
||
container=$$(docker ps --filter name=crowdsec --filter status=running --format '{{.Names}}' | grep -v init | head -1);
|
||
echo \"crowdsec-init: using container $$container\";
|
||
docker exec $$container cscli bouncers delete caddy-bouncer 2>/dev/null || true;
|
||
key=$$(docker exec $$container cscli bouncers add caddy-bouncer -o raw 2>&1);
|
||
if [ -z \"$$key\" ]; then
|
||
echo 'crowdsec-init: ERROR — failed to obtain bouncer key' >&2;
|
||
exit 1;
|
||
fi;
|
||
printf 'CROWDSEC_API_KEY=%s\n' \"$$key\" > \"$$out\";
|
||
echo \"crowdsec-init: bouncer key written (key length: $${#key})\";
|
||
"
|
||
restart: "no"
|
||
|
||
|
||
# ─── Caddy (reverse proxy + automatic HTTPS) ──────────────────────────────────
|
||
# Custom build includes github.com/mholt/caddy-ratelimit,
|
||
# github.com/hslatman/caddy-crowdsec-bouncer/http, and
|
||
# github.com/mholt/caddy-l4 (TCP layer4 proxy for Redis).
|
||
caddy:
|
||
image: kalekber/libnovel-caddy:${GIT_TAG:-latest}
|
||
build:
|
||
context: ./caddy
|
||
dockerfile: Dockerfile
|
||
labels:
|
||
com.centurylinklabs.watchtower.enable: "true"
|
||
restart: unless-stopped
|
||
depends_on:
|
||
backend:
|
||
condition: service_healthy
|
||
ui:
|
||
condition: service_healthy
|
||
crowdsec-init:
|
||
condition: service_completed_successfully
|
||
ports:
|
||
- "80:80"
|
||
- "443:443"
|
||
- "443:443/udp" # HTTP/3 (QUIC)
|
||
- "6380:6380" # Redis TCP proxy (TLS) for homelab runner → Asynq
|
||
environment:
|
||
DOMAIN: "${DOMAIN}"
|
||
CADDY_ACME_EMAIL: "${CADDY_ACME_EMAIL}"
|
||
env_file:
|
||
- path: ./crowdsec/.crowdsec.env
|
||
required: false
|
||
volumes:
|
||
- ./Caddyfile:/etc/caddy/Caddyfile:ro
|
||
- ./caddy/errors:/srv/errors:ro
|
||
- caddy_data:/data
|
||
- caddy_config:/config
|
||
- caddy_logs:/var/log/caddy
|
||
|
||
# ─── Watchtower (auto-redeploy custom services on new images) ────────────────
|
||
# Only watches services labelled com.centurylinklabs.watchtower.enable=true.
|
||
# Third-party infra images (minio, pocketbase, meilisearch, etc.) are excluded.
|
||
# doppler binary is mounted from the host so watchtower fetches fresh secrets
|
||
# on every start (notification URL, credentials) without baking them in.
|
||
watchtower:
|
||
image: containrrr/watchtower:latest
|
||
restart: unless-stopped
|
||
entrypoint: ["/usr/bin/doppler", "run", "--project", "libnovel", "--config", "prd", "--"]
|
||
command: ["/watchtower", "--label-enable", "--interval", "300", "--cleanup"]
|
||
volumes:
|
||
- /var/run/docker.sock:/var/run/docker.sock
|
||
- /usr/bin/doppler:/usr/bin/doppler:ro
|
||
- /root/.doppler:/root/.doppler:ro
|
||
environment:
|
||
HOME: "/root"
|
||
DOCKER_API_VERSION: "1.44"
|
||
|
||
volumes:
|
||
minio_data:
|
||
pb_data:
|
||
meili_data:
|
||
valkey_data:
|
||
redis_data:
|
||
caddy_data:
|
||
caddy_config:
|
||
caddy_logs:
|
||
crowdsec_data:
|