292 lines
12 KiB
Plaintext
292 lines
12 KiB
Plaintext
# =============================================================================
|
|
# REQUIRED SECRETS - You MUST change these before starting!
|
|
# =============================================================================
|
|
|
|
# PostgreSQL password - REQUIRED
|
|
# Generate: openssl rand -base64 24
|
|
POSTGRES_PASSWORD=CHANGE_ME_REQUIRED
|
|
|
|
# JWT secret for authentication - REQUIRED (min 32 characters)
|
|
# Generate: openssl rand -hex 32
|
|
JWT_SECRET=CHANGE_ME_REQUIRED_MIN_32_CHARS
|
|
|
|
# Initial admin account (created on first startup if no admin exists)
|
|
INITIAL_ADMIN_EMAIL=admin@example.com
|
|
INITIAL_ADMIN_PASSWORD=ChangeMe123!
|
|
|
|
# SMTP Configuration (required for email verification and password reset)
|
|
# Leave empty to disable email features (admin approval will be used instead)
|
|
SMTP_HOST=smtp.example.com
|
|
SMTP_PORT=587
|
|
SMTP_SECURE=false
|
|
SMTP_USER=your-smtp-username
|
|
SMTP_PASS=your-smtp-password
|
|
SMTP_FROM=noreply@example.com
|
|
SMTP_FROM_NAME=Media Manager
|
|
|
|
# Email settings
|
|
EMAIL_VERIFICATION_EXPIRY_HOURS=24
|
|
PASSWORD_RESET_EXPIRY_HOURS=1
|
|
|
|
# Application URL (used in email links)
|
|
APP_BASE_URL=http://localhost:3080
|
|
|
|
# CORS allowed origins (comma-separated)
|
|
# Add your production domains here
|
|
ALLOWED_ORIGINS=http://localhost:3080,http://localhost:8080
|
|
|
|
# Timezone for API server (logs and server-side timestamps)
|
|
# See: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
|
|
TZ=UTC
|
|
|
|
# Locale for date/time formatting (optional, defaults to browser locale)
|
|
# Examples: en-US, en-GB, de-DE, fr-FR
|
|
VITE_DEFAULT_LOCALE=
|
|
|
|
# =============================================================================
|
|
# MAXMIND GEOIP CONFIGURATION (optional)
|
|
# =============================================================================
|
|
# Provides geographic location data for session analytics
|
|
# Download GeoLite2-City.mmdb from: https://dev.maxmind.com/geoip/geolite2-free-geolocation-data
|
|
# Place the file in ./data/GeoLite2-City.mmdb (mounted to /app/data in container)
|
|
# Register for free account to download: https://www.maxmind.com/en/geolite2/signup
|
|
|
|
# =============================================================================
|
|
# CONTENT SAFETY CONFIGURATION (requires --profile safety)
|
|
# =============================================================================
|
|
# Set to false to disable LLM-based content safety checking
|
|
SAFETY_CHECK_ENABLED=true
|
|
|
|
# Admin email for high-severity content alerts (e.g., S4 Child Sexual Exploitation)
|
|
# If not set, falls back to SMTP_FROM
|
|
# Leave empty to disable high-severity email alerts
|
|
ADMIN_ALERT_EMAIL=
|
|
|
|
# =============================================================================
|
|
# OLLAMA CONFIGURATION
|
|
# =============================================================================
|
|
# Base URL for Ollama API (shared by safety and digest features)
|
|
OLLAMA_BASE_URL=http://ollama:11434
|
|
|
|
# Content Safety Model (Llama Guard for chat moderation)
|
|
OLLAMA_MODEL=llama-guard3:1b
|
|
|
|
# =============================================================================
|
|
# DIGEST FEATURE MODELS (requires --profile digest)
|
|
# =============================================================================
|
|
# Video analysis pipeline using vision-language models and speech recognition
|
|
#
|
|
# Vision Model - analyzes video frames (requires vision-language model)
|
|
# Used for: frame description, visual element detection, scene classification
|
|
# Options:
|
|
# - huihui_ai/qwen3-vl-abliterated:2b (uncensored, ~2GB VRAM) [RECOMMENDED]
|
|
# - qwen2-vl:7b (censored, ~7GB VRAM)
|
|
# - llava:7b (censored, ~7GB VRAM)
|
|
DIGEST_VISION_MODEL=huihui_ai/qwen3-vl-abliterated:2b
|
|
|
|
# Text Model - synthesizes analysis results (text-only LLM)
|
|
# Used for: tag extraction, transcript analysis, final synthesis
|
|
# Options:
|
|
# - huihui_ai/qwen3-abliterated:4b (uncensored, ~4GB VRAM) [RECOMMENDED]
|
|
# - qwen2.5:7b (censored, ~7GB VRAM)
|
|
# - llama3.1:8b (censored, ~8GB VRAM)
|
|
DIGEST_TEXT_MODEL=huihui_ai/qwen3-abliterated:4b
|
|
|
|
# Digest service URLs
|
|
DIGEST_OLLAMA_URL=http://ollama:11434
|
|
DIGEST_WHISPER_URL=http://whisper:5000
|
|
|
|
# Frame extraction interval (seconds between frames)
|
|
DIGEST_FRAME_INTERVAL=30
|
|
|
|
# =============================================================================
|
|
# JOYCAPTION VISION BACKEND (requires --profile joycaption)
|
|
# =============================================================================
|
|
# Alternative to Ollama vision models - uses JoyCaption GGUF via llama.cpp
|
|
# Better quality image captioning, fits on 6GB VRAM with Q3_K_M quantization
|
|
#
|
|
# Vision backend type: 'ollama' (default) or 'llama-server' (JoyCaption)
|
|
DIGEST_VISION_BACKEND=ollama
|
|
|
|
# JoyCaption server URL (when using llama-server backend)
|
|
JOYCAPTION_URL=http://joycaption:8080
|
|
JOYCAPTION_MODEL=llama-joycaption-beta-one-hf-llava
|
|
|
|
# Fall back to Ollama vision if JoyCaption fails
|
|
JOYCAPTION_FALLBACK_TO_OLLAMA=true
|
|
|
|
# JoyCaption model files (place in ./models directory)
|
|
# Download from: https://huggingface.co/Mungert/llama-joycaption-beta-one-hf-llava-GGUF
|
|
# Quantization options:
|
|
# - Q2_K: ~3.2GB model, ~4GB VRAM (good quality, safest for 6GB GPU)
|
|
# - Q3_K_M: ~4GB model, ~5GB VRAM (better quality, tight fit on 6GB)
|
|
# - Q4_K_M: ~5GB model, ~6GB VRAM (best quality, needs 8GB+ GPU)
|
|
JOYCAPTION_MODEL_PATH=/models/llama-joycaption-beta-one-hf-llava.Q2_K.gguf
|
|
JOYCAPTION_MMPROJ_PATH=/models/llama-joycaption-beta-one-llava-mmproj-model-f16.gguf
|
|
|
|
# llama-server configuration
|
|
JOYCAPTION_CTX_SIZE=4096
|
|
JOYCAPTION_GPU_LAYERS=999
|
|
JOYCAPTION_PARALLEL=1
|
|
|
|
# =============================================================================
|
|
# WHISPER TRANSCRIPTION
|
|
# =============================================================================
|
|
# Model sizes: tiny, base, small, medium, large-v2, large-v3
|
|
# Larger = more accurate but slower and more VRAM
|
|
WHISPER_MODEL=base
|
|
WHISPER_DEVICE=cuda
|
|
WHISPER_COMPUTE=float16
|
|
|
|
# =============================================================================
|
|
# OLLAMA AUTO-PULL MODELS
|
|
# =============================================================================
|
|
# Comma-separated list of models to pull on Ollama container startup
|
|
# Include all models needed for your enabled features
|
|
#
|
|
# Safety profile only:
|
|
# OLLAMA_MODELS=llama-guard3:1b
|
|
#
|
|
# Digest profile only:
|
|
# OLLAMA_MODELS=huihui_ai/qwen3-vl-abliterated:2b,huihui_ai/qwen3-abliterated:4b
|
|
#
|
|
# Both profiles (recommended):
|
|
OLLAMA_MODELS=llama-guard3:1b,huihui_ai/qwen3-vl-abliterated:2b,huihui_ai/qwen3-abliterated:4b
|
|
|
|
# =============================================================================
|
|
# JOB QUEUE CONFIGURATION
|
|
# =============================================================================
|
|
# GPU resource management for preventing job conflicts
|
|
# The system automatically queues GPU-intensive jobs to prevent VRAM exhaustion
|
|
|
|
# Maximum GPU VRAM in MB (default: 6000 for RTX 4050)
|
|
# Adjust based on your GPU: RTX 3090 = 24000, RTX 4080 = 16000, etc.
|
|
MAX_GPU_VRAM=6000
|
|
|
|
# =============================================================================
|
|
# FFMPEG ENCODING CONCURRENCY
|
|
# =============================================================================
|
|
# Number of parallel FFmpeg encode sessions for clip/scene generation.
|
|
# NVENC supports 3-5 concurrent sessions on consumer RTX GPUs (~200-400MB VRAM each).
|
|
# Set to 1 to restore sequential behavior.
|
|
FFMPEG_ENCODE_CONCURRENCY=2
|
|
|
|
# Disable job queue for immediate execution (legacy behavior)
|
|
# Set to 'true' to bypass the queue manager entirely
|
|
# DISABLE_JOB_QUEUE=false
|
|
|
|
# =============================================================================
|
|
# INBOX FILE WATCHER CONFIGURATION
|
|
# =============================================================================
|
|
# Automatically watches inbox directory for new video files.
|
|
# When a new file is fully written, it's scanned into the library
|
|
# and optionally triggers auto-digest if auto_digest_enabled is true.
|
|
|
|
# Enable/disable file watcher (default: false, can also enable via admin UI)
|
|
# Set to 'true' to auto-detect new files added to inbox directory
|
|
FILE_WATCHER_ENABLED=false
|
|
|
|
# Time in ms to wait for file size to stabilize before processing (default: 2000)
|
|
# Increase for slower network drives or when copying large files
|
|
FILE_WATCHER_DEBOUNCE_MS=2000
|
|
|
|
# =============================================================================
|
|
# CONTAINER LIFECYCLE CONFIGURATION
|
|
# =============================================================================
|
|
# By default, the system starts AI containers on-demand and stops them after use
|
|
# to minimize VRAM usage. Set to 'true' to disable this behavior when all AI
|
|
# containers are configured to run continuously (e.g., models fit in VRAM).
|
|
#
|
|
# When enabled:
|
|
# - ensureContainerRunning() only checks health, doesn't start containers
|
|
# - stopContainer() becomes a no-op, containers keep running
|
|
# - You must manually start containers: docker compose --profile ai up -d
|
|
DISABLE_CONTAINER_LIFECYCLE=false
|
|
|
|
# =============================================================================
|
|
# COMBINED SCENE DETECTION CONFIGURATION
|
|
# =============================================================================
|
|
# Uses three methods: TransNetV2 (neural), PySceneDetect (histogram), CLIP (semantic)
|
|
# Results are merged using configurable strategies for more accurate scene boundaries.
|
|
#
|
|
# Enable/disable combined detection (default: true)
|
|
# When disabled, falls back to PySceneDetect only
|
|
ENABLE_COMBINED_SCENE_DETECTION=true
|
|
|
|
# Merge strategy for combining cuts from multiple detectors
|
|
# Options: weighted (default), union, intersection, majority
|
|
# - weighted: Uses detector confidence weights (best balance)
|
|
# - union: Include all cuts (most comprehensive, may over-segment)
|
|
# - intersection: Only cuts from all 3 detectors (highest confidence)
|
|
# - majority: Cuts from 2+ detectors
|
|
SCENE_MERGE_STRATEGY=weighted
|
|
|
|
# Tolerance window for grouping cuts (seconds)
|
|
# Cuts within this window are considered "the same" cut
|
|
SCENE_MERGE_TOLERANCE=0.75
|
|
|
|
# Run GPU detectors in parallel (requires >12GB VRAM)
|
|
# When false (default), runs sequentially: PyScene -> TransNet -> CLIP
|
|
SCENE_DETECTION_PARALLEL=false
|
|
|
|
# Detector weights for weighted merge strategy
|
|
# Higher weight = more trusted detector
|
|
SCENE_WEIGHT_TRANSNET=1.0 # Neural network, best for soft transitions
|
|
SCENE_WEIGHT_PYSCENEDETECT=0.8 # Histogram-based, fast, good for hard cuts
|
|
SCENE_WEIGHT_CLIP=0.9 # Visual embeddings, semantic/content changes
|
|
|
|
# CLIP boundary detection configuration
|
|
# Extracts frames at interval, computes embeddings, detects similarity drops
|
|
CLIP_BOUNDARY_FRAME_INTERVAL=0.5 # Seconds between frames (0.5 = 2 fps)
|
|
CLIP_BOUNDARY_SIMILARITY_THRESHOLD=0.7 # Below this = scene boundary (0-1)
|
|
CLIP_BOUNDARY_MIN_GAP=2.0 # Minimum seconds between boundaries
|
|
|
|
# CLIP embeddings GPU batch size (default: 128)
|
|
# Higher = faster processing, more VRAM usage
|
|
# RTX 4050 6GB: safe up to 512, recommended 256
|
|
# RTX 3090 24GB: can use 1024+
|
|
CLIP_GPU_BATCH_SIZE=256
|
|
|
|
# Database path (inside container)
|
|
DATABASE_URL=/app/data/library.db
|
|
|
|
# Media library paths (host paths)
|
|
MEDIA_ROOT=/media/bunker-admin/Internal/plex/xxx/media
|
|
MEDIA_LOCAL=/media/bunker-admin/Internal/plex/xxx/media/local
|
|
MEDIA_PUBLIC=/media/bunker-admin/Internal/plex/xxx/media/public
|
|
STUDIOS_PATH=/media/bunker-admin/Internal/plex/xxx/media/local/studios
|
|
GIFS_PATH=/media/bunker-admin/Internal/plex/xxx/media/local/gifs
|
|
PLAYBACK_PATH=/media/bunker-admin/Internal/plex/xxx/media/public/playback
|
|
COMPILATIONS_PATH=/media/bunker-admin/Internal/plex/xxx/media/public/compilations
|
|
PUBLIC_CURATED_PATH=/media/bunker-admin/Internal/plex/xxx/media/public/curated
|
|
QUICKIES_PATH=/media/bunker-admin/Internal/plex/xxx/media/public/quickies
|
|
|
|
# =============================================================================
|
|
# PAYMENT SYSTEM CONFIGURATION
|
|
# =============================================================================
|
|
# IMAP settings for automatic e-transfer email parsing
|
|
# Leave empty to disable automatic payment polling (manual entry only)
|
|
#
|
|
# For Proton Mail: Requires Proton Mail Bridge running locally
|
|
# PAYMENT_IMAP_HOST=127.0.0.1 (or host.docker.internal from container)
|
|
# PAYMENT_IMAP_PORT=1143
|
|
# PAYMENT_IMAP_TLS=false (Bridge uses STARTTLS)
|
|
#
|
|
# For Gmail: Enable "Less secure app access" or use App Password
|
|
# PAYMENT_IMAP_HOST=imap.gmail.com
|
|
# PAYMENT_IMAP_PORT=993
|
|
# PAYMENT_IMAP_TLS=true
|
|
#
|
|
PAYMENT_IMAP_HOST=
|
|
PAYMENT_IMAP_PORT=993
|
|
PAYMENT_IMAP_USER=
|
|
PAYMENT_IMAP_PASS=
|
|
PAYMENT_IMAP_TLS=true
|
|
|
|
# How often to check for new payment emails (milliseconds)
|
|
# Default: 60000 (1 minute)
|
|
PAYMENT_POLL_INTERVAL_MS=60000
|
|
|
|
# Folder to move processed emails to (created automatically)
|
|
PAYMENT_PROCESSED_FOLDER=Processed
|