bunker-admin 5642a24c8f Sync CCP templates with production configs for complete instance provisioning
Closes 12 template drift gaps between the Control Panel templates and
production configs. New instances now provision with full monitoring
(alerts fire properly), correct Gitea DB type (postgres not mysql),
social sharing previews (OG meta bot routes), Excalidraw subdomain
routing, docker-socket-proxy for Homepage, and complete Grafana/
Alertmanager/Prometheus config copying.

Key changes:
- Rewrite Prometheus template: add alerting, rule_files, 5 scrape jobs
- Add cAdvisor, node-exporter, redis-exporter, gotify, docker-socket-proxy
- Fix Gitea env from mysql to postgres to match docker-compose
- Add OG bot detection + rewrite routes for campaigns/pages/gallery
- Add Excalidraw nginx server block + Pangolin draw subdomain
- Add embed port to discovery portConfig + emailTestMode to registration
- Copy alerts.yml, alertmanager.yml, Grafana dashboards to templates
- Add Listmonk proxy port and upgrade volume to API service

Bunker Admin
2026-03-05 08:32:49 -07:00

1071 lines
35 KiB
Handlebars

# Changemaker Lite — Instance: {{name}}
# Compose project: {{composeProject}}
# Generated by CCP
services:
# ─── Core Infrastructure ───────────────────────────────────
v2-postgres:
image: postgres:16-alpine
container_name: {{containerPrefix}}-postgres
restart: unless-stopped
environment:
POSTGRES_USER: changemaker
POSTGRES_PASSWORD: "{{secrets.postgresPassword}}"
POSTGRES_DB: changemaker_v2
volumes:
- {{containerPrefix}}-postgres-data:/var/lib/postgresql/data
- ./api/prisma/init-nocodb-db.sh:/docker-entrypoint-initdb.d/10-init-nocodb.sh:ro
- ./api/prisma/init-gancio-db.sh:/docker-entrypoint-initdb.d/20-init-gancio.sh:ro
- ./api/prisma/init-gitea-db.sh:/docker-entrypoint-initdb.d/30-init-gitea.sh:ro
ports:
- "127.0.0.1:{{ports.postgres}}:5432"
networks:
- {{networkName}}
healthcheck:
test: ["CMD-SHELL", "pg_isready -U changemaker -d changemaker_v2"]
interval: 10s
timeout: 5s
retries: 5
redis:
image: redis:7-alpine
container_name: {{containerPrefix}}-redis
restart: unless-stopped
command: "redis-server --appendonly yes --maxmemory 512mb --maxmemory-policy noeviction --requirepass {{secrets.redisPassword}}"
volumes:
- {{containerPrefix}}-redis-data:/data
networks:
- {{networkName}}
healthcheck:
test: ["CMD", "redis-cli", "-a", "{{secrets.redisPassword}}", "ping"]
interval: 10s
timeout: 5s
retries: 5
deploy:
resources:
limits:
cpus: '1'
memory: 512M
reservations:
cpus: '0.25'
memory: 256M
logging:
driver: "json-file"
options:
max-size: "5m"
max-file: "2"
# ─── Application Services ──────────────────────────────────
api:
build:
context: ./api
dockerfile: Dockerfile
target: development
container_name: {{containerPrefix}}-api
restart: unless-stopped
depends_on:
v2-postgres:
condition: service_healthy
redis:
condition: service_healthy
env_file: .env
environment:
DATABASE_URL: "postgresql://changemaker:{{secrets.postgresPassword}}@{{containerPrefix}}-postgres:5432/changemaker_v2"
REDIS_URL: "redis://:{{secrets.redisPassword}}@{{containerPrefix}}-redis:6379"
PORT: "4000"
NAR_DATA_DIR: /data
LISTMONK_URL: http://{{containerPrefix}}-listmonk:9000
ADMIN_URL: https://app.{{domain}}
API_URL: https://api.{{domain}}
{{#if enableGancio}}
GANCIO_URL: http://{{containerPrefix}}-gancio:13120
{{/if}}
ENABLE_MEET: "{{#if enableMeet}}true{{else}}false{{/if}}"
ENABLE_SMS: "{{#if enableSms}}true{{else}}false{{/if}}"
ENABLE_SOCIAL: "{{#if enableSocial}}true{{else}}false{{/if}}"
ENABLE_PEOPLE: "{{#if enablePeople}}true{{else}}false{{/if}}"
{{#if enableMeet}}
JITSI_APP_ID: changemaker
JITSI_APP_SECRET: "{{secrets.jitsiAppSecret}}"
JITSI_URL: http://{{containerPrefix}}-jitsi-web:80
{{/if}}
{{#if enableChat}}
ROCKETCHAT_URL: http://{{containerPrefix}}-rocketchat:3000
{{/if}}
ports:
- "{{ports.api}}:4000"
{{#if enableListmonk}}
- "9002:9002"
{{/if}}
volumes:
- ./assets/uploads:/app/uploads
- ./mkdocs:/mkdocs:rw
- ./data:/data:ro
- ./data/upgrade:/app/upgrade:rw
- ./configs:/app/configs:ro
networks:
- {{networkName}}
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:4000/api/health"]
interval: 15s
timeout: 5s
retries: 3
start_period: 30s
deploy:
resources:
limits:
cpus: '2'
memory: 1G
reservations:
cpus: '0.25'
memory: 256M
admin:
build:
context: ./admin
target: development
container_name: {{containerPrefix}}-admin
restart: unless-stopped
depends_on:
- api
environment:
DOMAIN: {{domain}}
NODE_ENV: production
VITE_API_URL: http://{{containerPrefix}}-api:4000
VITE_MKDOCS_URL: http://{{containerPrefix}}-mkdocs:8000
VITE_DOMAIN: {{domain}}
VITE_MKDOCS_SITE_PORT: "{{math ports.embed "+" 14}}"
{{#if enableMedia}}
VITE_MEDIA_API_URL: http://{{containerPrefix}}-media-api:4100
{{/if}}
ports:
- "{{ports.admin}}:3000"
networks:
- {{networkName}}
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://127.0.0.1:3000/"]
interval: 30s
timeout: 5s
retries: 3
start_period: 20s
{{#if enableMedia}}
media-api:
build:
context: ./api
dockerfile: Dockerfile.media
target: development
container_name: {{containerPrefix}}-media-api
restart: unless-stopped
depends_on:
v2-postgres:
condition: service_healthy
redis:
condition: service_healthy
env_file: .env
environment:
DATABASE_URL: "postgresql://changemaker:{{secrets.postgresPassword}}@{{containerPrefix}}-postgres:5432/changemaker_v2"
REDIS_URL: "redis://:{{secrets.redisPassword}}@{{containerPrefix}}-redis:6379"
MEDIA_API_PORT: "4100"
CORS_ORIGINS: https://app.{{domain}},http://localhost:{{ports.admin}}
ENABLE_MEDIA_FEATURES: "true"
MEDIA_ROOT: /media/local
MEDIA_UPLOADS: /media/uploads
volumes:
- ./media:/media:ro
- ./media/local/inbox:/media/local/inbox:rw
- ./media/local/thumbnails:/media/local/thumbnails:rw
- ./media/local/photos:/media/local/photos:rw
- ./media/public:/media/public:rw
networks:
- {{networkName}}
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://127.0.0.1:4100/health"]
interval: 15s
timeout: 5s
retries: 3
start_period: 30s
deploy:
resources:
limits:
cpus: '2'
memory: 1G
reservations:
cpus: '0.25'
memory: 256M
{{/if}}
# ─── Reverse Proxy ─────────────────────────────────────────
nginx:
image: nginx:alpine
container_name: {{containerPrefix}}-nginx
restart: unless-stopped
depends_on:
- api
- admin
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
- ./nginx/conf.d:/etc/nginx/conf.d:ro
ports:
- "{{ports.nginx}}:80"
- "{{math ports.embed "+" 0}}:8881" # NocoDB embed proxy
- "{{math ports.embed "+" 1}}:8882" # n8n embed proxy
- "{{math ports.embed "+" 2}}:8883" # Gitea embed proxy
- "{{math ports.embed "+" 3}}:8884" # MailHog embed proxy
- "{{math ports.embed "+" 4}}:8885" # Mini QR embed proxy
- "{{math ports.embed "+" 5}}:8886" # Excalidraw embed proxy
- "{{math ports.embed "+" 6}}:8887" # Homepage embed proxy
- "{{math ports.embed "+" 7}}:8888" # Code Server embed proxy
- "{{math ports.embed "+" 8}}:8889" # MkDocs embed proxy
- "{{math ports.embed "+" 9}}:8890" # Vaultwarden embed proxy
- "{{math ports.embed "+" 10}}:8891" # Rocket.Chat embed proxy
- "{{math ports.embed "+" 11}}:8892" # Gancio embed proxy
- "{{math ports.embed "+" 12}}:8893" # Grafana embed proxy
- "{{math ports.embed "+" 13}}:8894" # Listmonk embed proxy
- "{{math ports.embed "+" 14}}:8895" # MkDocs site embed proxy
- "{{math ports.embed "+" 15}}:8896" # Jitsi Meet embed proxy
networks:
- {{networkName}}
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://127.0.0.1:80/"]
interval: 30s
timeout: 5s
retries: 3
# ─── Supporting Services ───────────────────────────────────
nocodb-v2:
image: nocodb/nocodb:latest
container_name: {{containerPrefix}}-nocodb
restart: unless-stopped
depends_on:
v2-postgres:
condition: service_healthy
environment:
NC_DB: "pg://{{containerPrefix}}-postgres:5432?u=changemaker&p={{secrets.postgresPassword}}&d=nocodb_meta"
NC_ADMIN_EMAIL: "{{secrets.adminEmail}}"
NC_ADMIN_PASSWORD: "{{secrets.nocodbAdminPassword}}"
volumes:
- {{containerPrefix}}-nocodb-data:/usr/app/data
networks:
- {{networkName}}
mailhog:
image: mailhog/mailhog:latest
container_name: {{containerPrefix}}-mailhog
restart: unless-stopped
networks:
- {{networkName}}
logging:
driver: "json-file"
options:
max-size: "5m"
max-file: "2"
mkdocs:
image: squidfunk/mkdocs-material:latest
container_name: {{containerPrefix}}-mkdocs
restart: unless-stopped
volumes:
- ./mkdocs:/docs:rw
- ./assets/images:/docs/assets/images:rw
user: "1000:1000"
environment:
SITE_URL: https://{{domain}}
ADMIN_PORT: "{{ports.admin}}"
ADMIN_URL: https://app.{{domain}}
BASE_DOMAIN: https://{{domain}}
API_URL: https://api.{{domain}}
API_PORT: "{{ports.api}}"
{{#if enableMedia}}
MEDIA_API_PUBLIC_URL: https://media.{{domain}}
MEDIA_API_PORT: "4100"
{{/if}}
{{#if enableGancio}}
GANCIO_URL: http://{{containerPrefix}}-gancio:13120
GANCIO_PORT: "8092"
{{/if}}
command: serve --dev-addr=0.0.0.0:8000 --watch-theme --livereload
networks:
- {{networkName}}
{{#if enableListmonk}}
listmonk-db:
image: postgres:17-alpine
container_name: {{containerPrefix}}-listmonk-db
restart: unless-stopped
environment:
POSTGRES_USER: listmonk
POSTGRES_PASSWORD: "{{secrets.listmonkAdminPassword}}"
POSTGRES_DB: listmonk
volumes:
- {{containerPrefix}}-listmonk-data:/var/lib/postgresql/data
networks:
- {{networkName}}
healthcheck:
test: ["CMD-SHELL", "pg_isready -U listmonk"]
interval: 10s
timeout: 5s
retries: 6
listmonk-app:
image: listmonk/listmonk:latest
container_name: {{containerPrefix}}-listmonk
restart: unless-stopped
depends_on:
listmonk-db:
condition: service_healthy
command: [sh, -c, "./listmonk --install --idempotent --yes --config '' && ./listmonk --upgrade --yes --config '' && ./listmonk --config ''"]
environment:
LISTMONK_app__address: "0.0.0.0:9000"
LISTMONK_db__host: {{containerPrefix}}-listmonk-db
LISTMONK_db__port: "5432"
LISTMONK_db__user: listmonk
LISTMONK_db__password: "{{secrets.listmonkAdminPassword}}"
LISTMONK_db__database: listmonk
LISTMONK_db__ssl_mode: disable
TZ: Etc/UTC
LISTMONK_ADMIN_USER: admin
LISTMONK_ADMIN_PASSWORD: "{{secrets.listmonkAdminPassword}}"
volumes:
- ./assets/uploads:/listmonk/uploads:rw
networks:
- {{networkName}}
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:9000/"]
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
listmonk-init:
image: postgres:17-alpine
container_name: {{containerPrefix}}-listmonk-init
depends_on:
listmonk-app:
condition: service_started
restart: "no"
environment:
PGPASSWORD: "{{secrets.listmonkAdminPassword}}"
LISTMONK_API_USER: v2-api
LISTMONK_API_TOKEN: "{{secrets.listmonkApiToken}}"
LISTMONK_SMTP_HOST: {{containerPrefix}}-mailhog
LISTMONK_SMTP_PORT: "1025"
entrypoint: ["/bin/sh", "-c"]
command:
- |
echo "[listmonk-init] Waiting for Listmonk tables..."
for i in $$(seq 1 30); do
if psql -h {{containerPrefix}}-listmonk-db -U listmonk -d listmonk -c "SELECT 1 FROM users LIMIT 1" >/dev/null 2>&1; then
break
fi
sleep 2
done
if [ -n "$$LISTMONK_API_TOKEN" ]; then
echo "[listmonk-init] Upserting API user '$$LISTMONK_API_USER'..."
psql -h {{containerPrefix}}-listmonk-db -U listmonk -d listmonk -q <<SQL
INSERT INTO users (username, password, password_login, email, name, type, user_role_id, status)
VALUES ('$$LISTMONK_API_USER', '$$LISTMONK_API_TOKEN', true, '$$LISTMONK_API_USER@api.internal', '$$LISTMONK_API_USER', 'api', 1, 'enabled')
ON CONFLICT (username) DO UPDATE SET password = EXCLUDED.password, status = 'enabled', user_role_id = 1;
SQL
echo "[listmonk-init] API user configured"
else
echo "[listmonk-init] LISTMONK_API_TOKEN not set, skipping API user"
fi
MAILHOG_ENTRY='{"host":"{{containerPrefix}}-mailhog","port":1025,"username":"","password":"","tls_type":"none","auth_protocol":"none","enabled":true,"max_conns":5,"idle_timeout":"15s","wait_timeout":"5s","max_msg_retries":2,"tls_skip_verify":false,"email_headers":[],"hello_hostname":""}'
SMTP_VALUE="[$$MAILHOG_ENTRY]"
psql -h {{containerPrefix}}-listmonk-db -U listmonk -d listmonk -q <<SQL
UPDATE settings SET value = '$$SMTP_VALUE' WHERE key = 'smtp';
SQL
echo "[listmonk-init] SMTP configured"
echo "[listmonk-init] Done"
networks:
- {{networkName}}
{{/if}}
{{#if enableGancio}}
# Gancio config writer — must complete before Gancio starts.
# Without config.json, Gancio enters first-time setup mode and crashes
# with "Non empty db!" if the database was previously initialized.
gancio-config:
image: alpine:latest
container_name: {{containerPrefix}}-gancio-config
depends_on:
v2-postgres:
condition: service_healthy
volumes:
- {{containerPrefix}}-gancio-data:/gancio-data
entrypoint: ["/bin/sh", "-c"]
command:
- |
if [ ! -f /gancio-data/config.json ]; then
cat > /gancio-data/config.json <<'EOF'
{
"baseurl": "https://events.{{domain}}",
"title": "Events",
"description": "Community Events",
"server": { "host": "0.0.0.0", "port": 13120 },
"db": {
"dialect": "postgres",
"host": "{{containerPrefix}}-postgres",
"port": 5432,
"database": "gancio",
"username": "changemaker",
"password": "{{secrets.postgresPassword}}"
}
}
EOF
echo "[gancio-config] config.json created"
else
echo "[gancio-config] config.json already exists, skipping"
fi
restart: "no"
networks:
- {{networkName}}
gancio:
image: cisti/gancio:latest
container_name: {{containerPrefix}}-gancio
restart: unless-stopped
depends_on:
v2-postgres:
condition: service_healthy
gancio-config:
condition: service_completed_successfully
environment:
GANCIO_DATA: /home/node/data
NODE_ENV: production
GANCIO_DB_DIALECT: postgres
GANCIO_DB_HOST: {{containerPrefix}}-postgres
GANCIO_DB_PORT: "5432"
GANCIO_DB_DATABASE: gancio
GANCIO_DB_USERNAME: changemaker
GANCIO_DB_PASSWORD: "{{secrets.postgresPassword}}"
server__baseurl: https://events.{{domain}}
volumes:
- {{containerPrefix}}-gancio-data:/home/node/data
networks:
- {{networkName}}
healthcheck:
test: ["CMD", "node", "-e", "require('http').get('http://localhost:13120/', r => process.exit(r.statusCode < 400 ? 0 : 1)).on('error', () => process.exit(1))"]
interval: 30s
timeout: 10s
retries: 5
start_period: 60s
# Gancio post-start seeder — seeds theme settings after Gancio creates its tables
gancio-init:
image: postgres:16-alpine
container_name: {{containerPrefix}}-gancio-init
depends_on:
gancio:
condition: service_healthy
environment:
PGHOST: {{containerPrefix}}-postgres
PGUSER: changemaker
PGPASSWORD: "{{secrets.postgresPassword}}"
PGDATABASE: gancio
entrypoint: ["/bin/sh", "-c"]
command:
- |
echo "[gancio-init] Seeding Gancio default theme settings..."
psql -c "INSERT INTO settings (key, value, is_secret, \"createdAt\", \"updatedAt\") VALUES
('dark_colors', '{\"primary\": \"#FF6E40\", \"error\": \"#FF5252\", \"info\": \"#2196F3\", \"success\": \"#4CAF50\", \"warning\": \"#FB8C00\"}', false, NOW(), NOW()),
('light_colors', '{\"primary\": \"#FF4500\", \"error\": \"#FF5252\", \"info\": \"#2196F3\", \"success\": \"#4CAF50\", \"warning\": \"#FB8C00\"}', false, NOW(), NOW())
ON CONFLICT (key) DO NOTHING;"
echo "[gancio-init] Theme settings seeded"
echo "[gancio-init] Done"
restart: "no"
networks:
- {{networkName}}
{{/if}}
{{#if enableChat}}
nats-rocketchat:
image: nats:2.11-alpine
container_name: {{containerPrefix}}-nats
restart: unless-stopped
command: --http_port 8222
networks:
- {{networkName}}
mongodb-rocketchat:
image: mongo:6.0
container_name: {{containerPrefix}}-mongodb
restart: unless-stopped
command: ["mongod", "--replSet", "rs0", "--bind_ip_all"]
volumes:
- {{containerPrefix}}-mongodb-data:/data/db
networks:
- {{networkName}}
healthcheck:
test: ["CMD", "mongosh", "--quiet", "--eval", "try { rs.status().ok } catch(e) { rs.initiate({_id:'rs0',members:[{_id:0,host:'{{containerPrefix}}-mongodb:27017'}]}).ok }"]
interval: 10s
timeout: 10s
retries: 10
start_period: 30s
rocketchat:
image: rocketchat/rocket.chat:7.9.7
container_name: {{containerPrefix}}-rocketchat
restart: unless-stopped
depends_on:
mongodb-rocketchat:
condition: service_healthy
nats-rocketchat:
condition: service_started
environment:
ROOT_URL: http://chat.{{domain}}
MONGO_URL: mongodb://{{containerPrefix}}-mongodb:27017/rocketchat?replicaSet=rs0
MONGO_OPLOG_URL: mongodb://{{containerPrefix}}-mongodb:27017/local?replicaSet=rs0
TRANSPORTER: monolith+nats://{{containerPrefix}}-nats:4222
PORT: "3000"
ADMIN_USERNAME: rcadmin
ADMIN_NAME: Admin
ADMIN_EMAIL: "{{secrets.adminEmail}}"
ADMIN_PASS: "{{secrets.rocketchatAdminPassword}}"
CREATE_TOKENS_FOR_USERS: "true"
OVERWRITE_SETTING_Iframe_Integration_send_enable: "true"
OVERWRITE_SETTING_Iframe_Integration_receive_enable: "true"
OVERWRITE_SETTING_Iframe_Integration_receive_origin: http://app.{{domain}},https://app.{{domain}}
{{#if enableMeet}}
OVERWRITE_SETTING_Jitsi_Enabled: "true"
OVERWRITE_SETTING_Jitsi_Domain: meet.{{domain}}
OVERWRITE_SETTING_Jitsi_URL_Room_Prefix: RocketChat
OVERWRITE_SETTING_Jitsi_Enable_Channels: "true"
OVERWRITE_SETTING_Jitsi_Open_New_Window: "false"
OVERWRITE_SETTING_Jitsi_Enabled_TokenAuth: "true"
OVERWRITE_SETTING_Jitsi_Application_ID: changemaker
OVERWRITE_SETTING_Jitsi_Application_Secret: "{{secrets.jitsiAppSecret}}"
OVERWRITE_SETTING_VideoConf_Default_Provider: jitsi
{{/if}}
volumes:
- {{containerPrefix}}-rocketchat-uploads:/app/uploads
networks:
- {{networkName}}
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://127.0.0.1:3000/api/info"]
interval: 30s
timeout: 10s
retries: 10
start_period: 90s
{{/if}}
{{#if enablePangolin}}
# ─── Pangolin Tunnel ───────────────────────────────────────
newt:
image: fosrl/newt
container_name: {{containerPrefix}}-newt
restart: unless-stopped
depends_on:
- nginx
environment:
PANGOLIN_ENDPOINT: "{{pangolin.endpoint}}"
NEWT_ID: "{{pangolin.newtId}}"
NEWT_SECRET: "{{pangolin.newtSecret}}"
networks:
- {{networkName}}
{{/if}}
{{#if enableMeet}}
# ─── Jitsi Meet (Video Conferencing) ────────────────────
jitsi-web:
image: jitsi/web:stable-9823
container_name: {{containerPrefix}}-jitsi-web
restart: unless-stopped
depends_on:
- jitsi-prosody
environment:
XMPP_SERVER: {{containerPrefix}}-jitsi-prosody
XMPP_DOMAIN: meet.jitsi
XMPP_AUTH_DOMAIN: auth.meet.jitsi
XMPP_BOSH_URL_BASE: http://{{containerPrefix}}-jitsi-prosody:5280
XMPP_MUC_DOMAIN: muc.meet.jitsi
PUBLIC_URL: https://meet.{{domain}}
TZ: America/Edmonton
ENABLE_AUTH: "1"
AUTH_TYPE: jwt
JWT_APP_ID: changemaker
JWT_APP_SECRET: "{{secrets.jitsiAppSecret}}"
JWT_ACCEPTED_ISSUERS: changemaker
JWT_ACCEPTED_AUDIENCES: changemaker
volumes:
- {{containerPrefix}}-jitsi-web-config:/config
networks:
- {{networkName}}
jitsi-prosody:
image: jitsi/prosody:stable-9823
container_name: {{containerPrefix}}-jitsi-prosody
restart: unless-stopped
environment:
XMPP_DOMAIN: meet.jitsi
XMPP_AUTH_DOMAIN: auth.meet.jitsi
XMPP_MUC_DOMAIN: muc.meet.jitsi
XMPP_INTERNAL_MUC_DOMAIN: internal-muc.meet.jitsi
XMPP_RECORDER_DOMAIN: recorder.meet.jitsi
XMPP_CROSS_DOMAIN: "true"
JICOFO_AUTH_USER: focus
JICOFO_AUTH_PASSWORD: "{{secrets.jitsiJicofoAuthPassword}}"
JVB_AUTH_USER: jvb
JVB_AUTH_PASSWORD: "{{secrets.jitsiJvbAuthPassword}}"
TZ: America/Edmonton
ENABLE_AUTH: "1"
AUTH_TYPE: jwt
JWT_APP_ID: changemaker
JWT_APP_SECRET: "{{secrets.jitsiAppSecret}}"
JWT_ACCEPTED_ISSUERS: changemaker
JWT_ACCEPTED_AUDIENCES: changemaker
JWT_ALLOW_EMPTY: "0"
volumes:
- {{containerPrefix}}-jitsi-prosody-config:/config
- {{containerPrefix}}-jitsi-prosody-plugins:/prosody-plugins-custom
networks:
- {{networkName}}
jitsi-jicofo:
image: jitsi/jicofo:stable-9823
container_name: {{containerPrefix}}-jitsi-jicofo
restart: unless-stopped
depends_on:
- jitsi-prosody
environment:
XMPP_SERVER: {{containerPrefix}}-jitsi-prosody
XMPP_DOMAIN: meet.jitsi
XMPP_AUTH_DOMAIN: auth.meet.jitsi
XMPP_INTERNAL_MUC_DOMAIN: internal-muc.meet.jitsi
XMPP_MUC_DOMAIN: muc.meet.jitsi
JICOFO_AUTH_USER: focus
JICOFO_AUTH_PASSWORD: "{{secrets.jitsiJicofoAuthPassword}}"
TZ: America/Edmonton
volumes:
- {{containerPrefix}}-jitsi-jicofo-config:/config
networks:
- {{networkName}}
jitsi-jvb:
image: jitsi/jvb:stable-9823
container_name: {{containerPrefix}}-jitsi-jvb
restart: unless-stopped
depends_on:
- jitsi-prosody
environment:
XMPP_SERVER: {{containerPrefix}}-jitsi-prosody
XMPP_DOMAIN: meet.jitsi
XMPP_AUTH_DOMAIN: auth.meet.jitsi
XMPP_INTERNAL_MUC_DOMAIN: internal-muc.meet.jitsi
JVB_AUTH_USER: jvb
JVB_AUTH_PASSWORD: "{{secrets.jitsiJvbAuthPassword}}"
JVB_STUN_SERVERS: meet-jit-si-turnrelay.jitsi.net:443
JVB_PORT: "10000"
JVB_ADVERTISE_IPS: "{{jvbAdvertiseIp}}"
TZ: America/Edmonton
ports:
- "10000:10000/udp"
volumes:
- {{containerPrefix}}-jitsi-jvb-config:/config
networks:
- {{networkName}}
{{/if}}
# ─── Always-On Utilities ──────────────────────────────────
mini-qr:
image: ghcr.io/lyqht/mini-qr:latest
container_name: {{containerPrefix}}-mini-qr
restart: unless-stopped
networks:
- {{networkName}}
mkdocs-site-server:
image: nginx:alpine
container_name: {{containerPrefix}}-mkdocs-site
restart: unless-stopped
volumes:
- ./mkdocs/site:/usr/share/nginx/html:ro
networks:
- {{networkName}}
{{#if enableDevTools}}
# ─── Dev Tools ────────────────────────────────────────────
code-server:
image: lscr.io/linuxserver/code-server:latest
container_name: {{containerPrefix}}-code-server
restart: unless-stopped
environment:
PASSWORD: "{{secrets.nocodbAdminPassword}}"
SUDO_PASSWORD: "{{secrets.nocodbAdminPassword}}"
volumes:
- .:/config/workspace:rw
networks:
- {{networkName}}
gitea:
image: gitea/gitea:latest
container_name: {{containerPrefix}}-gitea
restart: unless-stopped
depends_on:
v2-postgres:
condition: service_healthy
environment:
GITEA__database__DB_TYPE: postgres
GITEA__database__HOST: {{containerPrefix}}-postgres:5432
GITEA__database__NAME: gitea
GITEA__database__USER: changemaker
GITEA__database__PASSWD: "{{secrets.postgresPassword}}"
GITEA__server__ROOT_URL: https://git.{{domain}}
GITEA__server__DOMAIN: git.{{domain}}
GITEA__server__HTTP_PORT: "3000"
GITEA__server__PROTOCOL: http
GITEA__server__ENABLE_GZIP: "true"
GITEA__server__X_FRAME_OPTIONS: ""
GITEA__security__INSTALL_LOCK: "true"
GITEA__attachment__MAX_SIZE: "1024"
GITEA__repository__MAX_CREATION_LIMIT: "-1"
GITEA__server__LFS_START_SERVER: "true"
volumes:
- {{containerPrefix}}-gitea-data:/data
networks:
- {{networkName}}
healthcheck:
test: ["CMD", "curl", "-fsSL", "http://localhost:3000/api/healthz"]
interval: 30s
timeout: 10s
retries: 5
start_period: 30s
# Gitea init — creates admin user after Gitea is healthy (idempotent)
# Must run as git user (UID 1000) — Gitea refuses to run as root
gitea-init:
image: gitea/gitea:latest
container_name: {{containerPrefix}}-gitea-init
user: "1000:1000"
depends_on:
gitea:
condition: service_healthy
environment:
GITEA__database__DB_TYPE: postgres
GITEA__database__HOST: {{containerPrefix}}-postgres:5432
GITEA__database__NAME: gitea
GITEA__database__USER: changemaker
GITEA__database__PASSWD: "{{secrets.postgresPassword}}"
GITEA__security__INSTALL_LOCK: "true"
volumes:
- {{containerPrefix}}-gitea-data:/data
entrypoint: ["/bin/sh", "-c"]
command:
- |
echo "[gitea-init] Running migrations (idempotent)..."
gitea migrate 2>/dev/null || true
if gitea admin user list --admin 2>/dev/null | grep -q "admin"; then
echo "[gitea-init] Admin user already exists, skipping"
else
echo "[gitea-init] Creating admin user..."
gitea admin user create \
--admin \
--username admin \
--password "{{secrets.giteaAdminPassword}}" \
--email "{{secrets.adminEmail}}" \
--must-change-password=false
echo "[gitea-init] Admin user created"
fi
echo "[gitea-init] Done"
restart: "no"
networks:
- {{networkName}}
n8n:
image: n8nio/n8n:latest
container_name: {{containerPrefix}}-n8n
restart: unless-stopped
environment:
N8N_ENCRYPTION_KEY: "{{secrets.n8nEncryptionKey}}"
WEBHOOK_URL: https://n8n.{{domain}}
N8N_HOST: n8n.{{domain}}
N8N_PROTOCOL: https
N8N_SECURE_COOKIE: "false"
volumes:
- {{containerPrefix}}-n8n-data:/home/node/.n8n
networks:
- {{networkName}}
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:5678/healthz"]
interval: 30s
timeout: 5s
retries: 3
docker-socket-proxy:
image: ghcr.io/tecnativa/docker-socket-proxy:latest
container_name: {{containerPrefix}}-docker-socket-proxy
restart: unless-stopped
environment:
CONTAINERS: 1
IMAGES: 1
INFO: 1
NETWORKS: 0
VOLUMES: 0
POST: 0
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
networks:
- {{networkName}}
homepage:
image: ghcr.io/gethomepage/homepage:latest
container_name: {{containerPrefix}}-homepage
restart: unless-stopped
environment:
DOCKER_HOST: tcp://{{containerPrefix}}-docker-socket-proxy:2375
volumes:
- {{containerPrefix}}-homepage-data:/app/config
depends_on:
- docker-socket-proxy
networks:
- {{networkName}}
excalidraw:
image: excalidraw/excalidraw:latest
container_name: {{containerPrefix}}-excalidraw
restart: unless-stopped
networks:
- {{networkName}}
# Vaultwarden — Password manager (Bitwarden-compatible)
vaultwarden:
image: vaultwarden/server:latest
container_name: {{containerPrefix}}-vaultwarden
restart: unless-stopped
environment:
ADMIN_TOKEN: "{{secrets.vaultwardenAdminToken}}"
DOMAIN: https://vault.{{domain}}
SIGNUPS_ALLOWED: "false"
WEBSOCKET_ENABLED: "true"
ROCKET_PORT: "80"
LOG_LEVEL: info
SMTP_HOST: {{containerPrefix}}-mailhog
SMTP_PORT: "1025"
SMTP_FROM: "noreply@{{domain}}"
SMTP_FROM_NAME: Vaultwarden
SMTP_SECURITY: "off"
SMTP_USERNAME: ""
SMTP_PASSWORD: ""
volumes:
- {{containerPrefix}}-vaultwarden-data:/data
networks:
- {{networkName}}
healthcheck:
test: ["CMD", "curl", "-sf", "http://localhost:80/alive"]
interval: 30s
timeout: 5s
retries: 3
start_period: 15s
# Vaultwarden init — invites the initial admin user (safe to re-run)
vaultwarden-init:
image: alpine/curl:latest
container_name: {{containerPrefix}}-vaultwarden-init
depends_on:
vaultwarden:
condition: service_healthy
restart: "no"
entrypoint: ["/bin/sh", "-c"]
command:
- |
echo "[vaultwarden-init] Waiting for Vaultwarden..."
for i in $$(seq 1 20); do
if curl -sf http://{{containerPrefix}}-vaultwarden:80/alive >/dev/null 2>&1; then
break
fi
sleep 2
done
VAULTWARDEN_ADMIN_TOKEN="{{secrets.vaultwardenAdminToken}}"
VAULTWARDEN_URL="http://{{containerPrefix}}-vaultwarden:80"
INVITE_EMAIL="{{secrets.adminEmail}}"
if [ -z "$$VAULTWARDEN_ADMIN_TOKEN" ]; then
echo "[vaultwarden-init] VAULTWARDEN_ADMIN_TOKEN not set, skipping invite"
exit 0
fi
echo "[vaultwarden-init] Authenticating with admin panel..."
SESSION_COOKIE=$$(mktemp)
HTTP_CODE=$$(curl -s -o /dev/null -w "%{http_code}" \
-c "$$SESSION_COOKIE" \
-X POST "$$VAULTWARDEN_URL/admin" \
-H "Content-Type: application/x-www-form-urlencoded" \
-d "token=$$VAULTWARDEN_ADMIN_TOKEN")
if [ "$$HTTP_CODE" != "200" ] && [ "$$HTTP_CODE" != "302" ]; then
echo "[vaultwarden-init] Admin auth failed (HTTP $$HTTP_CODE)"
rm -f "$$SESSION_COOKIE"
exit 1
fi
echo "[vaultwarden-init] Authenticated"
echo "[vaultwarden-init] Inviting $$INVITE_EMAIL..."
INVITE_CODE=$$(curl -s -w "\n%{http_code}" \
-b "$$SESSION_COOKIE" \
-X POST "$$VAULTWARDEN_URL/admin/invite" \
-H "Content-Type: application/json" \
-d "{\"email\":\"$$INVITE_EMAIL\"}")
INVITE_HTTP=$$(echo "$$INVITE_CODE" | tail -1)
INVITE_BODY=$$(echo "$$INVITE_CODE" | head -n -1)
if [ "$$INVITE_HTTP" = "200" ] || [ "$$INVITE_HTTP" = "422" ]; then
echo "[vaultwarden-init] Invite sent (or user already exists)"
else
echo "[vaultwarden-init] Invite failed (HTTP $$INVITE_HTTP): $$INVITE_BODY"
fi
rm -f "$$SESSION_COOKIE"
echo "[vaultwarden-init] Done"
networks:
- {{networkName}}
{{/if}}
{{#if enableMonitoring}}
# ─── Monitoring Stack ──────────────────────────────────────
prometheus:
image: prom/prometheus:latest
container_name: {{containerPrefix}}-prometheus
restart: unless-stopped
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--storage.tsdb.retention.time=30d'
volumes:
- ./configs/prometheus:/etc/prometheus:ro
- {{containerPrefix}}-prometheus-data:/prometheus
networks:
- {{networkName}}
grafana:
image: grafana/grafana:latest
container_name: {{containerPrefix}}-grafana
restart: unless-stopped
environment:
GF_SECURITY_ADMIN_PASSWORD: "{{secrets.grafanaAdminPassword}}"
GF_USERS_ALLOW_SIGN_UP: "false"
GF_SERVER_ROOT_URL: https://grafana.{{domain}}
GF_SECURITY_ALLOW_EMBEDDING: "true"
volumes:
- {{containerPrefix}}-grafana-data:/var/lib/grafana
- ./configs/grafana:/etc/grafana/provisioning
depends_on:
- prometheus
networks:
- {{networkName}}
alertmanager:
image: prom/alertmanager:latest
container_name: {{containerPrefix}}-alertmanager
restart: unless-stopped
command:
- '--config.file=/etc/alertmanager/alertmanager.yml'
- '--storage.path=/alertmanager'
volumes:
- ./configs/alertmanager:/etc/alertmanager:ro
- {{containerPrefix}}-alertmanager-data:/alertmanager
networks:
- {{networkName}}
cadvisor:
image: gcr.io/cadvisor/cadvisor:latest
container_name: {{containerPrefix}}-cadvisor
restart: unless-stopped
volumes:
- /:/rootfs:ro
- /var/run:/var/run:ro
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro
networks:
- {{networkName}}
node-exporter:
image: prom/node-exporter:latest
container_name: {{containerPrefix}}-node-exporter
restart: unless-stopped
command:
- '--path.rootfs=/host'
volumes:
- /:/host:ro,rslave
networks:
- {{networkName}}
redis-exporter:
image: oliver006/redis_exporter:latest
container_name: {{containerPrefix}}-redis-exporter
restart: unless-stopped
environment:
REDIS_ADDR: redis://{{containerPrefix}}-redis:6379
REDIS_PASSWORD: "{{secrets.redisPassword}}"
depends_on:
- redis
networks:
- {{networkName}}
gotify:
image: gotify/server:latest
container_name: {{containerPrefix}}-gotify
restart: unless-stopped
volumes:
- {{containerPrefix}}-gotify-data:/app/data
networks:
- {{networkName}}
{{/if}}
# ─── Volumes ──────────────────────────────────────────────
volumes:
{{containerPrefix}}-postgres-data:
{{containerPrefix}}-redis-data:
{{containerPrefix}}-nocodb-data:
{{#if enableListmonk}}
{{containerPrefix}}-listmonk-data:
{{/if}}
{{#if enableGancio}}
{{containerPrefix}}-gancio-data:
{{/if}}
{{#if enableChat}}
{{containerPrefix}}-mongodb-data:
{{containerPrefix}}-rocketchat-uploads:
{{/if}}
{{#if enableDevTools}}
{{containerPrefix}}-gitea-data:
{{containerPrefix}}-n8n-data:
{{containerPrefix}}-homepage-data:
{{containerPrefix}}-vaultwarden-data:
{{/if}}
{{#if enableMonitoring}}
{{containerPrefix}}-prometheus-data:
{{containerPrefix}}-grafana-data:
{{containerPrefix}}-alertmanager-data:
{{containerPrefix}}-gotify-data:
{{/if}}
{{#if enableMeet}}
{{containerPrefix}}-jitsi-web-config:
{{containerPrefix}}-jitsi-prosody-config:
{{containerPrefix}}-jitsi-prosody-plugins:
{{containerPrefix}}-jitsi-jicofo-config:
{{containerPrefix}}-jitsi-jvb-config:
{{/if}}
# ─── Networks ─────────────────────────────────────────────
networks:
{{networkName}}:
driver: bridge