171 lines
5.9 KiB
Bash
Executable File
171 lines
5.9 KiB
Bash
Executable File
#!/bin/bash
|
||
|
||
# =============================================================================
|
||
# Media Manager - Startup Script
|
||
# =============================================================================
|
||
# This script starts all services including AI containers, then gracefully
|
||
# stops all AI containers except Ollama to free up GPU VRAM while keeping
|
||
# the LLM server available.
|
||
# =============================================================================
|
||
|
||
set -e
|
||
|
||
# Parse command line arguments
|
||
SKIP_BUILD=""
|
||
if [[ "$1" == "--quick" || "$1" == "-q" ]]; then
|
||
SKIP_BUILD="true"
|
||
fi
|
||
|
||
# Colors for output
|
||
RED='\033[0;31m'
|
||
GREEN='\033[0;32m'
|
||
YELLOW='\033[1;33m'
|
||
BLUE='\033[0;34m'
|
||
CYAN='\033[0;36m'
|
||
BOLD='\033[1m'
|
||
NC='\033[0m' # No Color
|
||
|
||
# Helper functions
|
||
print_header() {
|
||
echo ""
|
||
echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
|
||
echo -e "${BOLD}${CYAN} $1${NC}"
|
||
echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
|
||
echo ""
|
||
}
|
||
|
||
print_step() {
|
||
echo -e "${GREEN}▶${NC} $1"
|
||
}
|
||
|
||
print_info() {
|
||
echo -e "${CYAN}ℹ${NC} $1"
|
||
}
|
||
|
||
print_warning() {
|
||
echo -e "${YELLOW}⚠${NC} $1"
|
||
}
|
||
|
||
print_success() {
|
||
echo -e "${GREEN}✓${NC} $1"
|
||
}
|
||
|
||
# AI containers to stop (all except Ollama)
|
||
AI_CONTAINERS_TO_STOP=(
|
||
"whisper"
|
||
"joycaption"
|
||
"face-recognition"
|
||
"yolo-detection"
|
||
"joytag"
|
||
"scene-detection"
|
||
"transnetv2"
|
||
"clip-embeddings"
|
||
)
|
||
|
||
# =============================================================================
|
||
# Main Script
|
||
# =============================================================================
|
||
|
||
echo ""
|
||
echo -e "${BOLD}${CYAN}"
|
||
echo " ╔══════════════════════════════════════════════════════════════════╗"
|
||
echo " ║ ║"
|
||
echo " ║ Media Manager - Startup Script ║"
|
||
echo " ║ ║"
|
||
echo " ╚══════════════════════════════════════════════════════════════════╝"
|
||
echo -e "${NC}"
|
||
|
||
if [[ -n "$SKIP_BUILD" ]]; then
|
||
print_info "Quick mode: skipping image builds"
|
||
fi
|
||
|
||
# =============================================================================
|
||
# Step 1: Build and Start AI Profile Containers
|
||
# =============================================================================
|
||
|
||
print_header "Step 1: Building and Starting AI Services"
|
||
|
||
if [[ -z "$SKIP_BUILD" ]]; then
|
||
print_step "Running: docker compose --profile ai up -d --build"
|
||
docker compose --profile ai up -d --build
|
||
else
|
||
print_step "Running: docker compose --profile ai up -d (skipping build)"
|
||
docker compose --profile ai up -d
|
||
fi
|
||
|
||
print_success "AI services started"
|
||
|
||
# =============================================================================
|
||
# Step 2: Build and Start Core Services
|
||
# =============================================================================
|
||
|
||
print_header "Step 2: Building and Starting Core Services"
|
||
|
||
if [[ -z "$SKIP_BUILD" ]]; then
|
||
print_step "Running: docker compose up -d --build"
|
||
docker compose up -d --build
|
||
else
|
||
print_step "Running: docker compose up -d (skipping build)"
|
||
docker compose up -d
|
||
fi
|
||
|
||
print_success "Core services started"
|
||
|
||
# =============================================================================
|
||
# Step 3: Stop Non-Ollama AI Containers
|
||
# =============================================================================
|
||
|
||
print_header "Step 3: Stopping Non-Ollama AI Containers"
|
||
|
||
print_info "Keeping Ollama running for LLM inference"
|
||
echo ""
|
||
|
||
for container in "${AI_CONTAINERS_TO_STOP[@]}"; do
|
||
print_step "Stopping $container..."
|
||
if docker compose stop "$container" 2>/dev/null; then
|
||
print_success "$container stopped"
|
||
else
|
||
print_warning "$container was not running or failed to stop"
|
||
fi
|
||
done
|
||
|
||
# =============================================================================
|
||
# Summary
|
||
# =============================================================================
|
||
|
||
print_header "Startup Complete!"
|
||
|
||
echo -e " ${GREEN}${BOLD}Services are ready!${NC}"
|
||
echo ""
|
||
echo " Running services:"
|
||
echo ""
|
||
echo -e " ${GREEN}✓${NC} Core Services"
|
||
echo -e " - postgres (database)"
|
||
echo -e " - api (backend)"
|
||
echo -e " - admin (dashboard)"
|
||
echo -e " - public (gallery)"
|
||
echo -e " - nginx (video streaming)"
|
||
echo ""
|
||
echo -e " ${GREEN}✓${NC} AI Services"
|
||
echo -e " - ollama (LLM server)"
|
||
echo ""
|
||
echo -e " ${YELLOW}○${NC} Stopped AI Services (start on-demand)"
|
||
for container in "${AI_CONTAINERS_TO_STOP[@]}"; do
|
||
echo -e " - $container"
|
||
done
|
||
echo ""
|
||
echo " Access the application at:"
|
||
echo ""
|
||
echo -e " ${CYAN}Admin Dashboard:${NC} http://localhost:8080"
|
||
echo -e " ${CYAN}Public Gallery:${NC} http://localhost:3080"
|
||
echo -e " ${CYAN}API:${NC} http://localhost:3001"
|
||
echo -e " ${CYAN}Ollama:${NC} http://localhost:11435"
|
||
echo ""
|
||
echo " Start individual AI services as needed:"
|
||
echo ""
|
||
echo -e " ${YELLOW}docker compose --profile ai start whisper${NC}"
|
||
echo -e " ${YELLOW}docker compose --profile ai start joycaption${NC}"
|
||
echo ""
|
||
echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
|
||
echo ""
|