install: preflight + teardown tooling + CCP tunnel cleanup on delete

Fixes surfaced by three rounds of fresh-install testing on marcelle:

- config.sh: add host-port preflight check (ss -tln) to catch
  cockpit-on-9090 style collisions before compose up; add
  --skip-port-check escape hatch; add --install-watcher /
  --no-install-watcher / --install-backup-timer /
  --no-install-backup-timer flags; -y --enable-all now installs both
  systemd units by default (previously silently skipped); print
  resolved admin email in Configuration Complete block.

- scripts/validate-env.sh: new section 5b "Host Port Availability"
  using ss-based detection, with process-name surfacing when run as
  root.

- scripts/pangolin-teardown.sh: new wrapper. Reads credentials from
  .env or takes --api-url/--api-key/--org-id flags. Dry-run by
  default; --yes to execute. Deletes resources before sites (avoids
  orphans). --keep-site-ids for safety.

- scripts/build-release.sh: include validate-env.sh and
  pangolin-teardown.sh in release tarball whitelist.

- CCP instances.service.ts: deleteInstance() now calls
  teardownTunnel() before composeDown when pangolinSiteId is set.
  Previously an admin clicking "Delete Instance" orphaned the
  Pangolin site + all its resources. Best-effort with try/catch
  matching the existing Docker-cleanup tolerance pattern.

- CLAUDE.md: sync drift — 44 → 50 migrations, 186 → 192 models,
  40 → 44 modules.

Bunker Admin
This commit is contained in:
bunker-admin 2026-04-16 12:50:48 -06:00
parent 13513aeca5
commit f9d566bd84
6 changed files with 315 additions and 22 deletions

View File

@ -63,8 +63,8 @@ Changemaker Lite is a self-hosted political campaign platform built with Docker
changemaker.lite/
├── api/ # Dual API servers (Express + Fastify)
│ ├── prisma/
│ │ ├── schema.prisma # 186 models: User, Campaign, Location, Shift, Payment, Social, etc.
│ │ ├── migrations/ # 44 Prisma migrations (full schema history)
│ │ ├── schema.prisma # 192 models: User, Campaign, Location, Shift, Payment, Social, etc.
│ │ ├── migrations/ # 50 Prisma migrations (full schema history)
│ │ └── seed.ts # Admin user, settings, page blocks
│ ├── Dockerfile.media # Fastify media server container
│ └── src/
@ -73,7 +73,7 @@ changemaker.lite/
│ ├── config/
│ │ └── env.ts # Zod-validated environment config (100+ vars)
│ ├── middleware/ # auth, rbac, rate-limit, validate, error-handler
│ ├── modules/ # 40 modules total
│ ├── modules/ # 44 modules total
│ │ ├── auth/ # JWT login, register, refresh, logout
│ │ ├── users/ # User CRUD + pagination + search
│ │ ├── settings/ # Site settings singleton (20+ feature flags)
@ -631,11 +631,11 @@ cd api && npx tsc --noEmit && cd ../admin && npx tsc --noEmit
- API clients: `{ api }` from `lib/api.ts`, `mediaApi` from `lib/media-api.ts`
### Database ORM
- **Prisma** (both APIs): 186 models in single `schema.prisma`. Use `UncheckedCreateInput`/`UncheckedUpdateInput` for foreign keys, `Prisma.InputJsonValue` for JSON arrays
- **Prisma** (both APIs): 192 models in single `schema.prisma`. Use `UncheckedCreateInput`/`UncheckedUpdateInput` for foreign keys, `Prisma.InputJsonValue` for JSON arrays
### Prisma Migration Workflow
- **Always use `prisma migrate dev`** for schema changes (not `prisma db push`) — `db push` applies changes directly but doesn't create migration files, causing drift
- **Migration history:** 44 migrations in `api/prisma/migrations/` fully cover the schema
- **Migration history:** 50 migrations in `api/prisma/migrations/` fully cover the schema
- **Production deploys:** Use `prisma migrate deploy` (not `migrate dev`)
### Key Gotchas
@ -772,8 +772,8 @@ V1 code has been removed from the repo. History preserved as `v1-archive` git ta
- `config.sh` — Interactive setup wizard (14 steps, release-mode aware)
### Database
- `api/prisma/schema.prisma` — Main schema (186 Prisma models)
- `api/prisma/migrations/`44 migration files (full schema history)
- `api/prisma/schema.prisma` — Main schema (192 Prisma models)
- `api/prisma/migrations/`50 migration files (full schema history)
- `api/prisma/seed.ts` — Database seeding
### Nginx

View File

@ -12,6 +12,7 @@ import { getDriverForInstance, AgentUnreachableError } from '../../services/exec
import { provision } from './provisioner';
import { CreateInstanceInput, UpdateInstanceInput, RegisterInstanceInput, ReconfigureInstanceInput, ConfigureTunnelInput } from './instances.schemas';
import { buildTemplateContext, renderAllTemplates, clearTemplateCache } from '../../services/template-engine';
import { teardownTunnel } from '../../services/tunnel.service';
import { logger } from '../../utils/logger';
import path from 'path';
@ -283,6 +284,18 @@ export async function deleteInstance(id: string, userId: string, ipAddress?: str
data: { status: InstanceStatus.DESTROYING, statusMessage: 'Shutting down containers...' },
});
// Tear down Pangolin site + resources first. If we crashed after composeDown
// but before this, the Pangolin entities would leak for the lifetime of the org.
// Best effort — matches the Docker-cleanup tolerance below.
if (instance.pangolinSiteId) {
try {
await teardownTunnel(id, userId, ipAddress ?? null);
logger.info(`[instances] ${instance.slug}: Pangolin tunnel torn down`);
} catch (err) {
logger.warn(`[instances] ${instance.slug}: Pangolin teardown warning: ${(err as Error).message}`);
}
}
// Stop containers and remove volumes
try {
const driver = await getDriverForInstance(instance);

View File

@ -19,6 +19,12 @@ NI_ADMIN_EMAIL=""
NI_ADMIN_PASSWORD=""
NI_PRODUCTION=true
NI_ENABLE_ALL=false
SKIP_PORT_CHECK=false
# Systemd unit install opt-in ("", "yes", "no")
# Empty means "use default for this mode": skipped in NI unless --enable-all.
NI_INSTALL_WATCHER=""
NI_INSTALL_BACKUP=""
# SMTP flags
NI_SMTP_HOST=""
@ -52,6 +58,11 @@ while [[ $# -gt 0 ]]; do
--admin-password) NI_ADMIN_PASSWORD="$2"; shift 2 ;;
--development) NI_PRODUCTION=false; shift ;;
--enable-all) NI_ENABLE_ALL=true; shift ;;
--skip-port-check) SKIP_PORT_CHECK=true; shift ;;
--install-watcher) NI_INSTALL_WATCHER="yes"; shift ;;
--no-install-watcher) NI_INSTALL_WATCHER="no"; shift ;;
--install-backup-timer) NI_INSTALL_BACKUP="yes"; shift ;;
--no-install-backup-timer) NI_INSTALL_BACKUP="no"; shift ;;
# SMTP
--smtp-host) NI_SMTP_HOST="$2"; shift 2 ;;
--smtp-port) NI_SMTP_PORT="$2"; shift 2 ;;
@ -80,7 +91,14 @@ while [[ $# -gt 0 ]]; do
echo " --admin-email EMAIL Set admin email (default: admin@DOMAIN)"
echo " --admin-password PASS Set admin password (must meet policy: 12+ chars, upper+lower+digit)"
echo " --development Set NODE_ENV=development (default: production)"
echo " --enable-all Enable all optional features"
echo " --enable-all Enable all optional features + install systemd units"
echo " --skip-port-check Skip host port availability check (not recommended)"
echo ""
echo "Systemd Units (default in -y mode: skipped, unless --enable-all):"
echo " --install-watcher Install upgrade watcher systemd unit"
echo " --no-install-watcher Skip upgrade watcher even with --enable-all"
echo " --install-backup-timer Install daily backup timer systemd unit"
echo " --no-install-backup-timer Skip backup timer even with --enable-all"
echo ""
echo "SMTP:"
echo " --smtp-host HOST SMTP server hostname"
@ -263,6 +281,32 @@ check_prerequisites() {
ok=false
fi
# Host port availability check — catches cockpit on :9090, other stray listeners.
# Not fatal on its own; we warn here and let validate-env.sh surface details later.
if command -v ss &>/dev/null; then
local host_conflicts=""
for port in 3000 4000 4100 5433 3001 3030 9090 8091 8025 9001 5678 8888; do
if ss -Htln 2>/dev/null | awk -v p=":$port" '$4 ~ p"$" {found=1} END{exit !found}'; then
host_conflicts+="$port "
fi
done
if [[ -n "$host_conflicts" ]]; then
warn "Host ports already in use: $host_conflicts"
warn "This will break 'docker compose up -d' on affected services."
warn "Common: cockpit.socket owns :9090 — 'sudo systemctl disable --now cockpit.socket'"
warn "Run './scripts/validate-env.sh' after setup for a full report."
if [[ "$NON_INTERACTIVE" == "true" ]]; then
error "Refusing to continue in non-interactive mode with host port conflicts."
error "Free the ports or pass --skip-port-check to override."
[[ "$SKIP_PORT_CHECK" != "true" ]] && ok=false
fi
else
success "Host ports available"
fi
else
info "ss not installed — skipping host port check"
fi
$ok || { echo ""; error "Missing prerequisites. Install them and re-run."; exit 1; }
}
@ -421,6 +465,7 @@ configure_admin() {
update_env_var "INITIAL_ADMIN_EMAIL" "$admin_email"
update_env_var "INITIAL_ADMIN_PASSWORD" "$admin_password"
update_env_var "N8N_USER_EMAIL" "$admin_email"
CONFIGURED_ADMIN_EMAIL="$admin_email"
success "Admin credentials configured ($admin_email)"
else
local default_email="admin@${CONFIGURED_DOMAIN:-cmlite.org}"
@ -449,6 +494,7 @@ configure_admin() {
update_env_var "INITIAL_ADMIN_EMAIL" "$admin_email"
update_env_var "INITIAL_ADMIN_PASSWORD" "$admin_password"
update_env_var "N8N_USER_EMAIL" "$admin_email"
CONFIGURED_ADMIN_EMAIL="$admin_email"
success "Admin credentials configured"
fi
}
@ -1978,20 +2024,32 @@ fix_container_permissions() {
install_upgrade_watcher() {
header "System Upgrade Watcher"
# Ensure upgrade IPC directory exists regardless of install decision
mkdir -p "$SCRIPT_DIR/data/upgrade"
# Resolve whether to install in non-interactive mode:
# --install-watcher => yes
# --no-install-watcher => no
# --enable-all (no override) => yes (new default)
# otherwise => no (preserve legacy behaviour)
local should_install="ask"
if [[ "$NON_INTERACTIVE" == "true" ]]; then
mkdir -p "$SCRIPT_DIR/data/upgrade"
info "Skipping systemd watcher install (run manually later)"
UPGRADE_WATCHER="skipped"
return
case "$NI_INSTALL_WATCHER" in
yes) should_install="yes" ;;
no) should_install="no" ;;
"") if [[ "$NI_ENABLE_ALL" == "true" ]]; then should_install="yes"; else should_install="no"; fi ;;
esac
if [[ "$should_install" == "no" ]]; then
info "Skipping systemd watcher install (pass --install-watcher or --enable-all to install)"
UPGRADE_WATCHER="skipped"
return
fi
fi
info "The upgrade watcher lets you trigger upgrades from the admin Settings page."
info "It installs a systemd path watcher that monitors for trigger files."
echo ""
# Ensure upgrade IPC directory exists
mkdir -p "$SCRIPT_DIR/data/upgrade"
local unit_src="$SCRIPT_DIR/scripts/systemd"
if [[ ! -f "$unit_src/changemaker-upgrade.path" ]] || [[ ! -f "$unit_src/changemaker-upgrade.service" ]]; then
warn "Systemd unit templates not found in scripts/systemd/ — skipping"
@ -2005,7 +2063,7 @@ install_upgrade_watcher() {
return
fi
if prompt_yes_no "Install the upgrade watcher (requires sudo)?"; then
if [[ "$should_install" == "yes" ]] || prompt_yes_no "Install the upgrade watcher (requires sudo)?"; then
# Generate units with correct paths substituted
local tmp_path tmp_service
tmp_path=$(mktemp)
@ -2041,10 +2099,18 @@ install_upgrade_watcher() {
install_backup_timer() {
header "Automated Backups"
local should_install="ask"
if [[ "$NON_INTERACTIVE" == "true" ]]; then
info "Skipping backup timer install (run manually later)"
BACKUP_TIMER="skipped"
return
case "$NI_INSTALL_BACKUP" in
yes) should_install="yes" ;;
no) should_install="no" ;;
"") if [[ "$NI_ENABLE_ALL" == "true" ]]; then should_install="yes"; else should_install="no"; fi ;;
esac
if [[ "$should_install" == "no" ]]; then
info "Skipping backup timer install (pass --install-backup-timer or --enable-all to install)"
BACKUP_TIMER="skipped"
return
fi
fi
info "Daily automated backups protect against data loss."
@ -2064,7 +2130,7 @@ install_backup_timer() {
return
fi
if prompt_yes_no "Install daily automated backups (requires sudo)?" "y"; then
if [[ "$should_install" == "yes" ]] || prompt_yes_no "Install daily automated backups (requires sudo)?" "y"; then
local tmp_timer tmp_service
tmp_timer=$(mktemp)
tmp_service=$(mktemp)
@ -2101,7 +2167,7 @@ print_summary() {
header "Configuration Complete"
echo -e " ${BOLD}Domain:${NC} ${CONFIGURED_DOMAIN:-cmlite.org}"
echo -e " ${BOLD}Admin email:${NC} (see .env: INITIAL_ADMIN_EMAIL)"
echo -e " ${BOLD}Admin email:${NC} ${CONFIGURED_ADMIN_EMAIL:-admin@${CONFIGURED_DOMAIN:-cmlite.org}}"
echo -e " ${BOLD}Admin password:${NC} [set]"
echo -e " ${BOLD}SMTP:${NC} ${SMTP_MODE:-mailhog}"
echo -e " ${BOLD}Media Manager:${NC} ${MEDIA_ENABLED:-no}"

View File

@ -117,7 +117,8 @@ cp "$PROJECT_DIR/api/prisma/init-gancio-db.sh" "$STAGE_DIR/scripts/"
for script in nocodb-init.sh gitea-init.sh mkdocs-entrypoint.sh \
backup.sh restore.sh \
upgrade.sh upgrade-check.sh upgrade-watcher.sh \
uninstall.sh test-deployment.sh; do
uninstall.sh test-deployment.sh \
validate-env.sh pangolin-teardown.sh; do
if [[ -f "$PROJECT_DIR/scripts/$script" ]]; then
cp "$PROJECT_DIR/scripts/$script" "$STAGE_DIR/scripts/"
fi

172
scripts/pangolin-teardown.sh Executable file
View File

@ -0,0 +1,172 @@
#!/bin/bash
# =============================================================================
# pangolin-teardown.sh — Delete all Pangolin resources + sites for an org
#
# Use when wiping a test environment before a fresh install. Idempotent.
#
# Credentials are read from .env (PANGOLIN_API_URL/PANGOLIN_API_KEY/PANGOLIN_ORG_ID)
# unless overridden by flags.
#
# Usage:
# ./scripts/pangolin-teardown.sh # dry-run (preview only)
# ./scripts/pangolin-teardown.sh --yes # actually delete
# ./scripts/pangolin-teardown.sh --yes \
# --api-url https://api.bnkserve.org/v1 \
# --api-key KEY --org-id cursed-knowledge
#
# Exit codes: 0 success, 1 error, 2 partial failure
# =============================================================================
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
ENV_FILE="${PROJECT_DIR}/.env"
RED='\033[0;31m'; YELLOW='\033[1;33m'; GREEN='\033[0;32m'; CYAN='\033[0;36m'; BOLD='\033[1m'; NC='\033[0m'
API_URL=""; API_KEY=""; ORG_ID=""
CONFIRM=false
KEEP_SITE_IDS=""
usage() {
sed -n '2,20p' "$0" | sed 's/^# \?//'
exit 0
}
while [[ $# -gt 0 ]]; do
case "$1" in
--yes|-y) CONFIRM=true; shift ;;
--api-url) API_URL="$2"; shift 2 ;;
--api-key) API_KEY="$2"; shift 2 ;;
--org-id) ORG_ID="$2"; shift 2 ;;
--keep-site-ids) KEEP_SITE_IDS="$2"; shift 2 ;;
-h|--help) usage ;;
*) echo "Unknown flag: $1"; exit 1 ;;
esac
done
# Load from .env if any field is missing
if [[ -z "$API_URL" || -z "$API_KEY" || -z "$ORG_ID" ]]; then
if [[ -f "$ENV_FILE" ]]; then
API_URL="${API_URL:-$(grep -E '^PANGOLIN_API_URL=' "$ENV_FILE" | head -1 | cut -d= -f2-)}"
API_KEY="${API_KEY:-$(grep -E '^PANGOLIN_API_KEY=' "$ENV_FILE" | head -1 | cut -d= -f2-)}"
ORG_ID="${ORG_ID:-$(grep -E '^PANGOLIN_ORG_ID=' "$ENV_FILE" | head -1 | cut -d= -f2-)}"
fi
fi
if [[ -z "$API_URL" || -z "$API_KEY" || -z "$ORG_ID" ]]; then
echo -e "${RED}ERROR:${NC} Missing Pangolin credentials."
echo " Provide via flags (--api-url/--api-key/--org-id) or set in .env:"
echo " PANGOLIN_API_URL, PANGOLIN_API_KEY, PANGOLIN_ORG_ID"
exit 1
fi
if ! command -v python3 >/dev/null 2>&1; then
echo -e "${RED}ERROR:${NC} python3 is required for JSON parsing"
exit 1
fi
# Normalize keep list into a space-separated lookup string
KEEP_LIST=" ${KEEP_SITE_IDS//,/ } "
echo -e "${BOLD}Pangolin teardown${NC}"
echo " API: $API_URL"
echo " Org: $ORG_ID"
[[ -n "$KEEP_SITE_IDS" ]] && echo " Keep sites: $KEEP_SITE_IDS"
[[ "$CONFIRM" == "false" ]] && echo -e " ${YELLOW}Mode: DRY RUN${NC} (pass --yes to execute)"
echo ""
# --- List resources ---
RES_JSON=$(curl -sf -H "Authorization: Bearer $API_KEY" "$API_URL/org/$ORG_ID/resources" || echo '')
if [[ -z "$RES_JSON" ]]; then
echo -e "${RED}ERROR:${NC} Failed to list resources (check API key + org id)"
exit 1
fi
RESOURCES=$(echo "$RES_JSON" | python3 -c "
import sys, json
d = json.load(sys.stdin)
for r in d.get('data', {}).get('resources', []):
print(f\"{r['resourceId']}\t{r.get('name','?')}\t{r.get('fullDomain','')}\")
")
RESOURCE_COUNT=$(echo -n "$RESOURCES" | grep -c . || true)
echo -e "${CYAN}Resources to delete: $RESOURCE_COUNT${NC}"
[[ -n "$RESOURCES" ]] && echo "$RESOURCES" | awk -F'\t' '{printf " - [%s] %s %s\n", $1, $2, $3}'
# --- List sites ---
SITES_JSON=$(curl -sf -H "Authorization: Bearer $API_KEY" "$API_URL/org/$ORG_ID/sites" || echo '')
SITES=$(echo "$SITES_JSON" | python3 -c "
import sys, json
d = json.load(sys.stdin)
for s in d.get('data', {}).get('sites', []):
print(f\"{s['siteId']}\t{s.get('name','?')}\t{s.get('online','?')}\")
")
# Filter out kept sites
FILTERED_SITES=""
if [[ -n "$SITES" ]]; then
while IFS=$'\t' read -r sid name online; do
if [[ "$KEEP_LIST" == *" $sid "* ]]; then
continue
fi
FILTERED_SITES+="${sid}\t${name}\t${online}"$'\n'
done <<< "$SITES"
fi
SITE_COUNT=$(echo -n "$FILTERED_SITES" | grep -c . || true)
echo ""
echo -e "${CYAN}Sites to delete: $SITE_COUNT${NC}"
[[ -n "$FILTERED_SITES" ]] && echo -e "$FILTERED_SITES" | awk -F'\t' 'NF>1 {printf " - [%s] %s online=%s\n", $1, $2, $3}'
if [[ "$CONFIRM" == "false" ]]; then
echo ""
echo -e "${YELLOW}Dry run complete.${NC} Re-run with --yes to actually delete."
exit 0
fi
echo ""
echo -e "${BOLD}Deleting...${NC}"
FAILURES=0
# Delete resources
if [[ -n "$RESOURCES" ]]; then
while IFS=$'\t' read -r rid name domain; do
[[ -z "$rid" ]] && continue
code=$(curl -s -o /dev/null -w "%{http_code}" -X DELETE "$API_URL/resource/$rid" -H "Authorization: Bearer $API_KEY")
if [[ "$code" == "200" || "$code" == "204" ]]; then
echo -e " ${GREEN}OK${NC} resource $rid ($name) deleted"
else
echo -e " ${RED}FAIL${NC} resource $rid ($name) HTTP $code"
FAILURES=$((FAILURES + 1))
fi
done <<< "$RESOURCES"
fi
# Delete sites (after resources, since resources reference sites)
if [[ -n "$FILTERED_SITES" ]]; then
while IFS=$'\t' read -r sid name online; do
[[ -z "$sid" ]] && continue
code=$(curl -s -o /dev/null -w "%{http_code}" -X DELETE "$API_URL/site/$sid" -H "Authorization: Bearer $API_KEY")
if [[ "$code" == "200" || "$code" == "204" ]]; then
echo -e " ${GREEN}OK${NC} site $sid ($name) deleted"
else
echo -e " ${RED}FAIL${NC} site $sid ($name) HTTP $code"
FAILURES=$((FAILURES + 1))
fi
done <<< "$(echo -e "$FILTERED_SITES")"
fi
echo ""
# Verify empty state
REMAINING_RES=$(curl -sf -H "Authorization: Bearer $API_KEY" "$API_URL/org/$ORG_ID/resources" | python3 -c "import sys,json; print(len(json.load(sys.stdin).get('data',{}).get('resources',[])))" 2>/dev/null || echo '?')
REMAINING_SITES=$(curl -sf -H "Authorization: Bearer $API_KEY" "$API_URL/org/$ORG_ID/sites" | python3 -c "import sys,json; print(len(json.load(sys.stdin).get('data',{}).get('sites',[])))" 2>/dev/null || echo '?')
echo -e "${CYAN}Remaining:${NC} $REMAINING_RES resources, $REMAINING_SITES sites"
if [[ $FAILURES -gt 0 ]]; then
echo -e "${YELLOW}Completed with $FAILURES failure(s).${NC}"
exit 2
fi
echo -e "${GREEN}Teardown complete.${NC}"

View File

@ -258,6 +258,47 @@ fi
echo ""
# --- 5b. Host Port Availability ---
# Detects processes already bound to ports we intend to use (e.g. cockpit on :9090).
echo "5b. Host Port Availability"
echo "--------------------------"
if ! command -v ss >/dev/null 2>&1; then
warn "ss not installed — skipping host port check (install iproute2 to enable)"
else
# Collect unique ports from PORT_MAP keys
HOST_CONFLICTS=()
for port in "${!PORT_MAP[@]}"; do
# ss -H (no header) -t (tcp) -l (listen) -n (numeric); match :PORT at end of local addr
# Also matches *:PORT and [::]:PORT
if ss -Htln 2>/dev/null | awk -v p=":$port" '$4 ~ p"$" {found=1} END{exit !found}'; then
owner="${PORT_MAP[$port]}"
# Process identification needs root; ss -tlnp includes users:(("name",pid=X,fd=Y))
proc=""
if [[ $EUID -eq 0 ]]; then
proc=$(ss -Htlnp 2>/dev/null | awk -v p=":$port" '$4 ~ p"$" {print; exit}' \
| grep -oP 'users:\(\("[^"]+"' | head -1 | sed 's/users:(("//')
fi
if [[ -n "$proc" ]]; then
error "Port $port already in use by '$proc' (want: $owner)"
else
error "Port $port already in use on host (want: $owner) — run as root to identify the process"
fi
HOST_CONFLICTS+=("$port")
fi
done
if [[ ${#HOST_CONFLICTS[@]} -eq 0 ]]; then
ok "All configured host ports are available"
else
info "Common culprits: cockpit.socket (9090), systemd-resolved (53), apache2/nginx host (80/443)"
info "Fix: \`sudo systemctl stop <service> && sudo systemctl disable <service>\` or remap the port in .env"
fi
fi
echo ""
# --- 6. Feature Flag Consistency ---
echo "6. Feature Flag Consistency"