bunker-admin 2fa50b001c Merge changemaker-control-panel into v2 monorepo
Absorbs the separate control-panel git repo as a subdirectory.
Instances and backups directories excluded via .gitignore.

Bunker Admin
2026-02-21 11:51:45 -07:00

314 lines
9.2 KiB
TypeScript

import { Prisma, BackupStatus, AuditAction, InstanceStatus } from '@prisma/client';
import fs from 'fs/promises';
import path from 'path';
import crypto from 'crypto';
import { exec as execCb } from 'child_process';
import { promisify } from 'util';
import { prisma } from '../lib/prisma';
import { env } from '../config/env';
import { AppError } from '../middleware/error-handler';
import { decryptJson } from '../utils/encryption';
import * as docker from './docker.service';
import { logger } from '../utils/logger';
const exec = promisify(execCb);
/**
* Compute SHA-256 hash of a file.
*/
async function fileHash(filePath: string): Promise<string> {
const fileBuffer = await fs.readFile(filePath);
return crypto.createHash('sha256').update(fileBuffer).digest('hex');
}
/**
* Get file size in bytes.
*/
async function fileSize(filePath: string): Promise<number> {
const stat = await fs.stat(filePath);
return stat.size;
}
/**
* Create a backup for a given instance.
*/
export async function createBackup(instanceId: string, userId?: string, ipAddress?: string) {
const instance = await prisma.instance.findUnique({ where: { id: instanceId } });
if (!instance) {
throw new AppError(404, 'Instance not found', 'NOT_FOUND');
}
if (instance.status !== InstanceStatus.RUNNING) {
throw new AppError(400, `Cannot backup instance in ${instance.status} state`, 'INVALID_STATE');
}
if ((instance as { isRegistered?: boolean }).isRegistered) {
throw new AppError(400, 'Backups not managed by CCP for registered instances', 'NOT_MANAGED');
}
// Create backup record
const backup = await prisma.backup.create({
data: {
instanceId,
status: BackupStatus.PENDING,
},
});
// Run backup asynchronously
performBackup(backup.id, instance, userId, ipAddress).catch((err) => {
logger.error(`[backup] Backup ${backup.id} failed: ${(err as Error).message}`);
});
return backup;
}
async function performBackup(
backupId: string,
instance: { id: string; slug: string; basePath: string; composeProject: string; encryptedSecrets: string | null },
userId?: string,
ipAddress?: string
) {
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
const backupDir = path.join(env.BACKUP_STORAGE_PATH, instance.slug, timestamp);
try {
// Update status to IN_PROGRESS
await prisma.backup.update({
where: { id: backupId },
data: { status: BackupStatus.IN_PROGRESS },
});
// Ensure backup directory exists
await fs.mkdir(backupDir, { recursive: true });
const manifestFiles: Array<{ name: string; size: number; sha256: string }> = [];
// 1. Dump PostgreSQL
try {
const secrets = instance.encryptedSecrets
? decryptJson<Record<string, string>>(instance.encryptedSecrets)
: {} as Record<string, string>;
const pgPassword = secrets.V2_POSTGRES_PASSWORD || secrets.postgresPassword || 'changemaker';
// Use docker compose exec to run pg_dump inside the container
// Pass PGPASSWORD via -e flag so pg_dump can authenticate
const dumpOutput = await docker.composeExec(
instance.basePath,
instance.composeProject,
'v2-postgres',
`pg_dump -U changemaker -d changemaker`,
300_000, // 5 min timeout for large DBs
{ PGPASSWORD: pgPassword }
);
const dumpPath = path.join(backupDir, 'v2-postgres.sql');
await fs.writeFile(dumpPath, dumpOutput);
// Gzip the dump
await exec(`gzip "${dumpPath}"`, { timeout: 120_000 });
const gzPath = dumpPath + '.gz';
manifestFiles.push({
name: 'v2-postgres.sql.gz',
size: await fileSize(gzPath),
sha256: await fileHash(gzPath),
});
logger.info(`[backup] ${instance.slug}: PostgreSQL dump complete`);
} catch (err) {
logger.warn(`[backup] ${instance.slug}: PostgreSQL dump failed: ${(err as Error).message}`);
// Continue with backup — mark the dump as failed in manifest
}
// 2. Archive uploads if they exist
const uploadsDir = path.join(instance.basePath, 'uploads');
try {
await fs.access(uploadsDir);
const uploadsArchive = path.join(backupDir, 'uploads.tar.gz');
await exec(`tar -czf "${uploadsArchive}" -C "${instance.basePath}" uploads`, { timeout: 300_000 });
manifestFiles.push({
name: 'uploads.tar.gz',
size: await fileSize(uploadsArchive),
sha256: await fileHash(uploadsArchive),
});
logger.info(`[backup] ${instance.slug}: Uploads archive complete`);
} catch {
// No uploads directory or archive failed — skip
logger.debug(`[backup] ${instance.slug}: No uploads directory or archive skipped`);
}
// 3. Generate manifest
const manifest = {
instanceId: instance.id,
instanceSlug: instance.slug,
timestamp: new Date().toISOString(),
files: manifestFiles,
};
const manifestPath = path.join(backupDir, 'manifest.json');
await fs.writeFile(manifestPath, JSON.stringify(manifest, null, 2));
// 4. Create final archive
const archiveName = `backup-${instance.slug}-${timestamp}.tar.gz`;
const archivePath = path.join(env.BACKUP_STORAGE_PATH, instance.slug, archiveName);
await exec(`tar -czf "${archivePath}" -C "${path.dirname(backupDir)}" "${path.basename(backupDir)}"`, {
timeout: 300_000,
});
const totalSize = await fileSize(archivePath);
// Cleanup the temp directory
await fs.rm(backupDir, { recursive: true, force: true });
// Update backup record
await prisma.backup.update({
where: { id: backupId },
data: {
status: BackupStatus.COMPLETED,
archivePath,
sizeBytes: BigInt(totalSize),
manifest: manifest as unknown as Prisma.InputJsonValue,
completedAt: new Date(),
},
});
// Audit log
if (userId) {
await prisma.auditLog.create({
data: {
userId,
instanceId: instance.id,
action: AuditAction.BACKUP_CREATE,
details: { backupId, archiveName, sizeBytes: totalSize },
ipAddress,
},
});
}
logger.info(`[backup] ${instance.slug}: Backup complete (${(totalSize / 1024 / 1024).toFixed(1)} MB)`);
} catch (err) {
// Update backup as failed
await prisma.backup.update({
where: { id: backupId },
data: {
status: BackupStatus.FAILED,
errorMessage: (err as Error).message,
completedAt: new Date(),
},
});
// Cleanup temp directory on failure
try {
await fs.rm(backupDir, { recursive: true, force: true });
} catch {
// Ignore cleanup errors
}
throw err;
}
}
/**
* Delete a backup (file + DB record).
*/
export async function deleteBackup(backupId: string, userId?: string, ipAddress?: string) {
const backup = await prisma.backup.findUnique({
where: { id: backupId },
include: { instance: { select: { id: true, slug: true } } },
});
if (!backup) {
throw new AppError(404, 'Backup not found', 'NOT_FOUND');
}
// Delete archive file
if (backup.archivePath) {
try {
await fs.unlink(backup.archivePath);
} catch {
logger.warn(`[backup] Could not delete file: ${backup.archivePath}`);
}
}
await prisma.backup.delete({ where: { id: backupId } });
if (userId) {
await prisma.auditLog.create({
data: {
userId,
instanceId: backup.instanceId,
action: AuditAction.BACKUP_DELETE,
details: { backupId, instanceSlug: backup.instance?.slug },
ipAddress,
},
});
}
}
/**
* List backups with optional instance filter and pagination.
*/
export async function listBackups(instanceId?: string, page = 1, limit = 50) {
const where = instanceId ? { instanceId } : {};
const [data, total] = await Promise.all([
prisma.backup.findMany({
where,
orderBy: { startedAt: 'desc' },
skip: (page - 1) * limit,
take: limit,
include: {
instance: { select: { id: true, name: true, slug: true } },
},
}),
prisma.backup.count({ where }),
]);
return { data, total, page, limit };
}
/**
* Get a single backup by ID.
*/
export async function getBackup(backupId: string) {
const backup = await prisma.backup.findUnique({ where: { id: backupId } });
if (!backup) {
throw new AppError(404, 'Backup not found', 'NOT_FOUND');
}
return backup;
}
/**
* Cleanup backups older than retention period.
*/
export async function cleanupOldBackups(retentionDays: number): Promise<number> {
const cutoff = new Date(Date.now() - retentionDays * 24 * 60 * 60 * 1000);
const oldBackups = await prisma.backup.findMany({
where: {
startedAt: { lt: cutoff },
status: { in: [BackupStatus.COMPLETED, BackupStatus.FAILED] },
},
});
let deleted = 0;
for (const backup of oldBackups) {
try {
if (backup.archivePath) {
await fs.unlink(backup.archivePath);
}
await prisma.backup.delete({ where: { id: backup.id } });
deleted++;
} catch (err) {
logger.warn(`[backup] Failed to cleanup backup ${backup.id}: ${(err as Error).message}`);
}
}
if (deleted > 0) {
logger.info(`[backup] Cleaned up ${deleted} old backups (>${retentionDays} days)`);
}
return deleted;
}