Fixes surfaced by three rounds of fresh-install testing on marcelle: - config.sh: add host-port preflight check (ss -tln) to catch cockpit-on-9090 style collisions before compose up; add --skip-port-check escape hatch; add --install-watcher / --no-install-watcher / --install-backup-timer / --no-install-backup-timer flags; -y --enable-all now installs both systemd units by default (previously silently skipped); print resolved admin email in Configuration Complete block. - scripts/validate-env.sh: new section 5b "Host Port Availability" using ss-based detection, with process-name surfacing when run as root. - scripts/pangolin-teardown.sh: new wrapper. Reads credentials from .env or takes --api-url/--api-key/--org-id flags. Dry-run by default; --yes to execute. Deletes resources before sites (avoids orphans). --keep-site-ids for safety. - scripts/build-release.sh: include validate-env.sh and pangolin-teardown.sh in release tarball whitelist. - CCP instances.service.ts: deleteInstance() now calls teardownTunnel() before composeDown when pangolinSiteId is set. Previously an admin clicking "Delete Instance" orphaned the Pangolin site + all its resources. Best-effort with try/catch matching the existing Docker-cleanup tolerance pattern. - CLAUDE.md: sync drift — 44 → 50 migrations, 186 → 192 models, 40 → 44 modules. Bunker Admin
173 lines
6.0 KiB
Bash
Executable File
173 lines
6.0 KiB
Bash
Executable File
#!/bin/bash
|
|
# =============================================================================
|
|
# pangolin-teardown.sh — Delete all Pangolin resources + sites for an org
|
|
#
|
|
# Use when wiping a test environment before a fresh install. Idempotent.
|
|
#
|
|
# Credentials are read from .env (PANGOLIN_API_URL/PANGOLIN_API_KEY/PANGOLIN_ORG_ID)
|
|
# unless overridden by flags.
|
|
#
|
|
# Usage:
|
|
# ./scripts/pangolin-teardown.sh # dry-run (preview only)
|
|
# ./scripts/pangolin-teardown.sh --yes # actually delete
|
|
# ./scripts/pangolin-teardown.sh --yes \
|
|
# --api-url https://api.bnkserve.org/v1 \
|
|
# --api-key KEY --org-id cursed-knowledge
|
|
#
|
|
# Exit codes: 0 success, 1 error, 2 partial failure
|
|
# =============================================================================
|
|
set -euo pipefail
|
|
|
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
|
|
ENV_FILE="${PROJECT_DIR}/.env"
|
|
|
|
RED='\033[0;31m'; YELLOW='\033[1;33m'; GREEN='\033[0;32m'; CYAN='\033[0;36m'; BOLD='\033[1m'; NC='\033[0m'
|
|
|
|
API_URL=""; API_KEY=""; ORG_ID=""
|
|
CONFIRM=false
|
|
KEEP_SITE_IDS=""
|
|
|
|
usage() {
|
|
sed -n '2,20p' "$0" | sed 's/^# \?//'
|
|
exit 0
|
|
}
|
|
|
|
while [[ $# -gt 0 ]]; do
|
|
case "$1" in
|
|
--yes|-y) CONFIRM=true; shift ;;
|
|
--api-url) API_URL="$2"; shift 2 ;;
|
|
--api-key) API_KEY="$2"; shift 2 ;;
|
|
--org-id) ORG_ID="$2"; shift 2 ;;
|
|
--keep-site-ids) KEEP_SITE_IDS="$2"; shift 2 ;;
|
|
-h|--help) usage ;;
|
|
*) echo "Unknown flag: $1"; exit 1 ;;
|
|
esac
|
|
done
|
|
|
|
# Load from .env if any field is missing
|
|
if [[ -z "$API_URL" || -z "$API_KEY" || -z "$ORG_ID" ]]; then
|
|
if [[ -f "$ENV_FILE" ]]; then
|
|
API_URL="${API_URL:-$(grep -E '^PANGOLIN_API_URL=' "$ENV_FILE" | head -1 | cut -d= -f2-)}"
|
|
API_KEY="${API_KEY:-$(grep -E '^PANGOLIN_API_KEY=' "$ENV_FILE" | head -1 | cut -d= -f2-)}"
|
|
ORG_ID="${ORG_ID:-$(grep -E '^PANGOLIN_ORG_ID=' "$ENV_FILE" | head -1 | cut -d= -f2-)}"
|
|
fi
|
|
fi
|
|
|
|
if [[ -z "$API_URL" || -z "$API_KEY" || -z "$ORG_ID" ]]; then
|
|
echo -e "${RED}ERROR:${NC} Missing Pangolin credentials."
|
|
echo " Provide via flags (--api-url/--api-key/--org-id) or set in .env:"
|
|
echo " PANGOLIN_API_URL, PANGOLIN_API_KEY, PANGOLIN_ORG_ID"
|
|
exit 1
|
|
fi
|
|
|
|
if ! command -v python3 >/dev/null 2>&1; then
|
|
echo -e "${RED}ERROR:${NC} python3 is required for JSON parsing"
|
|
exit 1
|
|
fi
|
|
|
|
# Normalize keep list into a space-separated lookup string
|
|
KEEP_LIST=" ${KEEP_SITE_IDS//,/ } "
|
|
|
|
echo -e "${BOLD}Pangolin teardown${NC}"
|
|
echo " API: $API_URL"
|
|
echo " Org: $ORG_ID"
|
|
[[ -n "$KEEP_SITE_IDS" ]] && echo " Keep sites: $KEEP_SITE_IDS"
|
|
[[ "$CONFIRM" == "false" ]] && echo -e " ${YELLOW}Mode: DRY RUN${NC} (pass --yes to execute)"
|
|
echo ""
|
|
|
|
# --- List resources ---
|
|
RES_JSON=$(curl -sf -H "Authorization: Bearer $API_KEY" "$API_URL/org/$ORG_ID/resources" || echo '')
|
|
if [[ -z "$RES_JSON" ]]; then
|
|
echo -e "${RED}ERROR:${NC} Failed to list resources (check API key + org id)"
|
|
exit 1
|
|
fi
|
|
|
|
RESOURCES=$(echo "$RES_JSON" | python3 -c "
|
|
import sys, json
|
|
d = json.load(sys.stdin)
|
|
for r in d.get('data', {}).get('resources', []):
|
|
print(f\"{r['resourceId']}\t{r.get('name','?')}\t{r.get('fullDomain','')}\")
|
|
")
|
|
|
|
RESOURCE_COUNT=$(echo -n "$RESOURCES" | grep -c . || true)
|
|
echo -e "${CYAN}Resources to delete: $RESOURCE_COUNT${NC}"
|
|
[[ -n "$RESOURCES" ]] && echo "$RESOURCES" | awk -F'\t' '{printf " - [%s] %s %s\n", $1, $2, $3}'
|
|
|
|
# --- List sites ---
|
|
SITES_JSON=$(curl -sf -H "Authorization: Bearer $API_KEY" "$API_URL/org/$ORG_ID/sites" || echo '')
|
|
SITES=$(echo "$SITES_JSON" | python3 -c "
|
|
import sys, json
|
|
d = json.load(sys.stdin)
|
|
for s in d.get('data', {}).get('sites', []):
|
|
print(f\"{s['siteId']}\t{s.get('name','?')}\t{s.get('online','?')}\")
|
|
")
|
|
|
|
# Filter out kept sites
|
|
FILTERED_SITES=""
|
|
if [[ -n "$SITES" ]]; then
|
|
while IFS=$'\t' read -r sid name online; do
|
|
if [[ "$KEEP_LIST" == *" $sid "* ]]; then
|
|
continue
|
|
fi
|
|
FILTERED_SITES+="${sid}\t${name}\t${online}"$'\n'
|
|
done <<< "$SITES"
|
|
fi
|
|
|
|
SITE_COUNT=$(echo -n "$FILTERED_SITES" | grep -c . || true)
|
|
echo ""
|
|
echo -e "${CYAN}Sites to delete: $SITE_COUNT${NC}"
|
|
[[ -n "$FILTERED_SITES" ]] && echo -e "$FILTERED_SITES" | awk -F'\t' 'NF>1 {printf " - [%s] %s online=%s\n", $1, $2, $3}'
|
|
|
|
if [[ "$CONFIRM" == "false" ]]; then
|
|
echo ""
|
|
echo -e "${YELLOW}Dry run complete.${NC} Re-run with --yes to actually delete."
|
|
exit 0
|
|
fi
|
|
|
|
echo ""
|
|
echo -e "${BOLD}Deleting...${NC}"
|
|
|
|
FAILURES=0
|
|
|
|
# Delete resources
|
|
if [[ -n "$RESOURCES" ]]; then
|
|
while IFS=$'\t' read -r rid name domain; do
|
|
[[ -z "$rid" ]] && continue
|
|
code=$(curl -s -o /dev/null -w "%{http_code}" -X DELETE "$API_URL/resource/$rid" -H "Authorization: Bearer $API_KEY")
|
|
if [[ "$code" == "200" || "$code" == "204" ]]; then
|
|
echo -e " ${GREEN}OK${NC} resource $rid ($name) deleted"
|
|
else
|
|
echo -e " ${RED}FAIL${NC} resource $rid ($name) HTTP $code"
|
|
FAILURES=$((FAILURES + 1))
|
|
fi
|
|
done <<< "$RESOURCES"
|
|
fi
|
|
|
|
# Delete sites (after resources, since resources reference sites)
|
|
if [[ -n "$FILTERED_SITES" ]]; then
|
|
while IFS=$'\t' read -r sid name online; do
|
|
[[ -z "$sid" ]] && continue
|
|
code=$(curl -s -o /dev/null -w "%{http_code}" -X DELETE "$API_URL/site/$sid" -H "Authorization: Bearer $API_KEY")
|
|
if [[ "$code" == "200" || "$code" == "204" ]]; then
|
|
echo -e " ${GREEN}OK${NC} site $sid ($name) deleted"
|
|
else
|
|
echo -e " ${RED}FAIL${NC} site $sid ($name) HTTP $code"
|
|
FAILURES=$((FAILURES + 1))
|
|
fi
|
|
done <<< "$(echo -e "$FILTERED_SITES")"
|
|
fi
|
|
|
|
echo ""
|
|
# Verify empty state
|
|
REMAINING_RES=$(curl -sf -H "Authorization: Bearer $API_KEY" "$API_URL/org/$ORG_ID/resources" | python3 -c "import sys,json; print(len(json.load(sys.stdin).get('data',{}).get('resources',[])))" 2>/dev/null || echo '?')
|
|
REMAINING_SITES=$(curl -sf -H "Authorization: Bearer $API_KEY" "$API_URL/org/$ORG_ID/sites" | python3 -c "import sys,json; print(len(json.load(sys.stdin).get('data',{}).get('sites',[])))" 2>/dev/null || echo '?')
|
|
echo -e "${CYAN}Remaining:${NC} $REMAINING_RES resources, $REMAINING_SITES sites"
|
|
|
|
if [[ $FAILURES -gt 0 ]]; then
|
|
echo -e "${YELLOW}Completed with $FAILURES failure(s).${NC}"
|
|
exit 2
|
|
fi
|
|
|
|
echo -e "${GREEN}Teardown complete.${NC}"
|