Follows the listmonk-init pattern: an alpine:3 container that runs once after NocoDB is healthy, calls the REST API to register changemaker_v2 as a browsable data source, and exits. Idempotent — exits immediately if the base already has tables, and guards against duplicate sources during async table discovery. Bunker Admin
210 lines
7.2 KiB
Bash
Executable File
210 lines
7.2 KiB
Bash
Executable File
#!/bin/sh
|
|
set -e
|
|
|
|
PREFIX="[nocodb-init]"
|
|
|
|
# ─── Utilities ────────────────────────────────────────────────────────────────
|
|
|
|
log() { echo "$PREFIX $1"; }
|
|
fail() { echo "$PREFIX ERROR: $1" >&2; exit 1; }
|
|
|
|
api() {
|
|
# api METHOD /path [data]
|
|
METHOD="$1"; ENDPOINT="$2"; DATA="$3"
|
|
if [ -n "$DATA" ]; then
|
|
curl -sf -X "$METHOD" "${NOCODB_URL}${ENDPOINT}" \
|
|
-H "Content-Type: application/json" \
|
|
-H "xc-auth: ${TOKEN}" \
|
|
-d "$DATA"
|
|
else
|
|
curl -sf -X "$METHOD" "${NOCODB_URL}${ENDPOINT}" \
|
|
-H "Content-Type: application/json" \
|
|
-H "xc-auth: ${TOKEN}"
|
|
fi
|
|
}
|
|
|
|
# ─── Install dependencies ─────────────────────────────────────────────────────
|
|
|
|
apk add --no-cache curl jq >/dev/null 2>&1
|
|
log "Dependencies installed"
|
|
|
|
# ─── Step 1: Authenticate ─────────────────────────────────────────────────────
|
|
|
|
log "Authenticating with NocoDB..."
|
|
TOKEN=""
|
|
for i in $(seq 1 15); do
|
|
RESPONSE=$(curl -sf -X POST "${NOCODB_URL}/api/v1/auth/user/signin" \
|
|
-H "Content-Type: application/json" \
|
|
-d "{\"email\":\"${NC_ADMIN_EMAIL}\",\"password\":\"${NC_ADMIN_PASSWORD}\"}" 2>/dev/null) || true
|
|
|
|
if [ -n "$RESPONSE" ]; then
|
|
TOKEN=$(echo "$RESPONSE" | jq -r '.token // empty')
|
|
if [ -n "$TOKEN" ]; then
|
|
break
|
|
fi
|
|
fi
|
|
|
|
log " Waiting for NocoDB to be ready... (attempt $i/15)"
|
|
sleep 3
|
|
done
|
|
|
|
if [ -z "$TOKEN" ]; then
|
|
fail "Could not authenticate with NocoDB after 15 attempts"
|
|
fi
|
|
log "Authenticated"
|
|
|
|
# ─── Step 2: Check for existing base ──────────────────────────────────────────
|
|
|
|
BASE_NAME="Changemaker Platform"
|
|
INTEGRATION_NAME="Changemaker PostgreSQL"
|
|
|
|
log "Checking for existing base..."
|
|
BASES=$(api GET "/api/v2/meta/bases/")
|
|
EXISTING_BASE_ID=$(echo "$BASES" | jq -r --arg name "$BASE_NAME" '.list[] | select(.title == $name) | .id // empty' 2>/dev/null)
|
|
|
|
if [ -n "$EXISTING_BASE_ID" ]; then
|
|
# Base exists — check if it has tables (fully synced)
|
|
TABLES=$(api GET "/api/v2/meta/bases/${EXISTING_BASE_ID}/tables")
|
|
TABLE_COUNT=$(echo "$TABLES" | jq -r '.list | length' 2>/dev/null)
|
|
|
|
if [ "$TABLE_COUNT" -gt 0 ] 2>/dev/null; then
|
|
log "Base '${BASE_NAME}' already exists with ${TABLE_COUNT} tables. Nothing to do."
|
|
exit 0
|
|
fi
|
|
|
|
# Check if an integration-linked source already exists (sync may still be in progress)
|
|
# NocoDB auto-creates a default source (fk_integration_id=null, is_local=true) — ignore it
|
|
SOURCES=$(api GET "/api/v2/meta/bases/${EXISTING_BASE_ID}/sources" 2>/dev/null) || true
|
|
HAS_REAL_SOURCE=$(echo "$SOURCES" | jq -r '[.list[] | select(.fk_integration_id != null)] | length' 2>/dev/null)
|
|
|
|
if [ "$HAS_REAL_SOURCE" -gt 0 ] 2>/dev/null; then
|
|
log "Base has an integration source — table sync in progress, waiting..."
|
|
for i in $(seq 1 30); do
|
|
TABLES=$(api GET "/api/v2/meta/bases/${EXISTING_BASE_ID}/tables" 2>/dev/null) || true
|
|
TABLE_COUNT=$(echo "$TABLES" | jq -r '.list | length' 2>/dev/null)
|
|
if [ "$TABLE_COUNT" -gt 0 ] 2>/dev/null; then
|
|
log "Table sync complete: ${TABLE_COUNT} tables discovered"
|
|
exit 0
|
|
fi
|
|
sleep 3
|
|
done
|
|
log "Source exists, tables will appear eventually. Nothing more to do."
|
|
exit 0
|
|
fi
|
|
|
|
log "Base exists but has no integration source — will add one"
|
|
fi
|
|
|
|
# ─── Step 3: Create or reuse integration ──────────────────────────────────────
|
|
|
|
log "Checking for existing integration..."
|
|
INTEGRATIONS=$(api GET "/api/v2/meta/integrations")
|
|
INTEGRATION_ID=$(echo "$INTEGRATIONS" | jq -r --arg name "$INTEGRATION_NAME" '.list[] | select(.title == $name) | .id // empty' 2>/dev/null)
|
|
|
|
if [ -z "$INTEGRATION_ID" ]; then
|
|
log "Creating integration '${INTEGRATION_NAME}'..."
|
|
INTEGRATION_PAYLOAD=$(cat <<INTEOF
|
|
{
|
|
"title": "${INTEGRATION_NAME}",
|
|
"type": "database",
|
|
"sub_type": "pg",
|
|
"config": {
|
|
"client": "pg",
|
|
"connection": {
|
|
"host": "${DB_HOST}",
|
|
"port": ${DB_PORT},
|
|
"user": "${DB_USER}",
|
|
"password": "${DB_PASSWORD}",
|
|
"database": "${DB_NAME}"
|
|
}
|
|
}
|
|
}
|
|
INTEOF
|
|
)
|
|
INTEGRATION_RESULT=$(api POST "/api/v2/meta/integrations" "$INTEGRATION_PAYLOAD")
|
|
INTEGRATION_ID=$(echo "$INTEGRATION_RESULT" | jq -r '.id // empty')
|
|
|
|
if [ -z "$INTEGRATION_ID" ]; then
|
|
fail "Failed to create integration"
|
|
fi
|
|
log "Integration created: ${INTEGRATION_ID}"
|
|
else
|
|
log "Reusing existing integration: ${INTEGRATION_ID}"
|
|
fi
|
|
|
|
# ─── Step 4: Create base + add source ────────────────────────────────────────
|
|
# NocoDB v0.301: inline sources in POST /bases/ are ignored.
|
|
# Must create base first, then add source via POST /bases/{id}/sources.
|
|
|
|
SOURCE_PAYLOAD=$(cat <<SRCEOF
|
|
{
|
|
"type": "pg",
|
|
"fk_integration_id": "${INTEGRATION_ID}",
|
|
"inflection_column": "none",
|
|
"inflection_table": "none",
|
|
"config": {
|
|
"client": "pg",
|
|
"connection": {
|
|
"host": "${DB_HOST}",
|
|
"port": ${DB_PORT},
|
|
"user": "${DB_USER}",
|
|
"password": "${DB_PASSWORD}",
|
|
"database": "${DB_NAME}"
|
|
}
|
|
}
|
|
}
|
|
SRCEOF
|
|
)
|
|
|
|
if [ -z "$EXISTING_BASE_ID" ]; then
|
|
log "Creating base '${BASE_NAME}'..."
|
|
BASE_RESULT=$(api POST "/api/v2/meta/bases/" "{\"title\":\"${BASE_NAME}\"}")
|
|
BASE_ID=$(echo "$BASE_RESULT" | jq -r '.id // empty')
|
|
|
|
if [ -z "$BASE_ID" ]; then
|
|
fail "Failed to create base"
|
|
fi
|
|
log "Base created: ${BASE_ID}"
|
|
else
|
|
BASE_ID="$EXISTING_BASE_ID"
|
|
fi
|
|
|
|
log "Adding PostgreSQL source to base..."
|
|
api POST "/api/v2/meta/bases/${BASE_ID}/sources" "$SOURCE_PAYLOAD" >/dev/null
|
|
log "Source added — table discovery started"
|
|
|
|
# ─── Step 5: Wait for table sync ─────────────────────────────────────────────
|
|
|
|
log "Waiting for table discovery..."
|
|
for i in $(seq 1 30); do
|
|
TABLES=$(api GET "/api/v2/meta/bases/${BASE_ID}/tables" 2>/dev/null) || true
|
|
TABLE_COUNT=$(echo "$TABLES" | jq -r '.list | length' 2>/dev/null)
|
|
|
|
if [ "$TABLE_COUNT" -gt 0 ] 2>/dev/null; then
|
|
log "Table sync complete: ${TABLE_COUNT} tables discovered"
|
|
break
|
|
fi
|
|
|
|
if [ "$i" = "30" ]; then
|
|
log "Table sync still in progress after 90s — tables will appear eventually"
|
|
break
|
|
fi
|
|
|
|
sleep 3
|
|
done
|
|
|
|
# ─── Step 6: Clean up default empty base ──────────────────────────────────────
|
|
|
|
log "Checking for default empty base..."
|
|
# Re-fetch bases list (may have changed since step 2)
|
|
BASES=$(api GET "/api/v2/meta/bases/")
|
|
DEFAULT_BASE_ID=$(echo "$BASES" | jq -r '.list[] | select(.title == "Base") | .id // empty' 2>/dev/null)
|
|
|
|
if [ -n "$DEFAULT_BASE_ID" ] && [ "$DEFAULT_BASE_ID" != "$BASE_ID" ]; then
|
|
log "Deleting default empty base..."
|
|
api DELETE "/api/v2/meta/bases/${DEFAULT_BASE_ID}" >/dev/null 2>&1 || true
|
|
log "Default base cleaned up"
|
|
fi
|
|
|
|
log "Done"
|