Files
TimeTracker/docker-compose.yml
T
Dries Peeters 52c7e9f02a feat(ai): gate helper by default; add uninstall docs and release 5.5.6
- Honor AI_ENABLED across session AI, REST v1, LLM service, templates, and
  context; add regression tests for the AI helper gate.
- Docker Compose: optional Ollama behind the ai profile; align env.example
  and example compose with safe defaults.
- Add UNINSTALL.md with a dedicated AI teardown section; cross-link from
  README, INSTALLATION, Getting Started, docs index, and Docker setup guide.
- Record 5.5.6 in CHANGELOG and sync version examples in BUILD_CONFIGURATION;
  bump setup.py to 5.5.6.
2026-05-14 06:46:59 +02:00

283 lines
10 KiB
YAML

# Default stack: app + Postgres + nginx (no local LLM). To bundle Ollama and enable the AI helper:
# docker compose --profile ai up -d
# Set AI_ENABLED=true in .env when using --profile ai (or use a hosted openai_compatible provider only).
services:
# Certificate generator - runs once to create self-signed certs with SANs
certgen:
build:
context: .
dockerfile: docker/Dockerfile.certgen
container_name: timetracker-certgen
volumes:
- ./nginx/ssl:/certs
environment:
- HOST_IP=${HOST_IP:-192.168.1.100}
command: /generate-certs.sh
restart: "no"
# HTTPS reverse proxy (TLS terminates here)
nginx:
image: nginx:alpine
container_name: timetracker-nginx
ports:
- "${HTTP_PORT:-80}:80"
- "${HTTPS_PORT:-443}:443"
volumes:
- ./nginx/conf.d:/etc/nginx/conf.d:ro
- ./nginx/ssl:/etc/nginx/ssl:ro
depends_on:
certgen:
condition: service_completed_successfully
app:
condition: service_started
restart: unless-stopped
app:
build: .
container_name: timetracker-app
environment:
- TZ=${TZ:-Europe/Brussels}
- CURRENCY=${CURRENCY:-EUR}
- ROUNDING_MINUTES=${ROUNDING_MINUTES:-1}
- SINGLE_ACTIVE_TIMER=${SINGLE_ACTIVE_TIMER:-true}
- ALLOW_SELF_REGISTER=${ALLOW_SELF_REGISTER:-false}
- IDLE_TIMEOUT_MINUTES=${IDLE_TIMEOUT_MINUTES:-30}
- ADMIN_USERNAMES=${ADMIN_USERNAMES:-admin}
# IMPORTANT: Change SECRET_KEY in production! Used for sessions and CSRF tokens.
# Generate a secure key: python -c "import secrets; print(secrets.token_hex(32))"
#
# CSRF CONFIGURATION:
# - WTF_CSRF_SSL_STRICT: Set to 'false' for HTTP access (localhost or IP address)
# Set to 'true' only when using HTTPS in production
# - If accessing via IP address (e.g., 192.168.1.100), also set:
# SESSION_COOKIE_SECURE=false and CSRF_COOKIE_SECURE=false
#
# TROUBLESHOOTING: If forms fail with "CSRF token missing or invalid":
# 1. Verify SECRET_KEY is set and doesn't change between restarts
# 2. Check CSRF is enabled: WTF_CSRF_ENABLED=true
# 3. Ensure cookies are enabled in your browser
# 4. If behind a reverse proxy, ensure it forwards cookies correctly
# 5. Check the token hasn't expired (increase WTF_CSRF_TIME_LIMIT if needed)
# 6. If accessing via IP (not localhost): WTF_CSRF_SSL_STRICT=false
# For details: docs/CSRF_CONFIGURATION.md and docs/CSRF_IP_ACCESS_GUIDE.md
# NOTE: In production, the app refuses to start with an invalid/short SECRET_KEY.
# Provide it via your shell env or a .env file (recommended).
- SECRET_KEY=${SECRET_KEY:?Set SECRET_KEY to a random 32+ char string}
# Disable strict Referer check by default to avoid privacy/port issues
- WTF_CSRF_SSL_STRICT=${WTF_CSRF_SSL_STRICT:-true}
- WTF_CSRF_ENABLED=${WTF_CSRF_ENABLED:-true}
- WTF_CSRF_TIME_LIMIT=${WTF_CSRF_TIME_LIMIT:-3600}
- SESSION_COOKIE_SECURE=${SESSION_COOKIE_SECURE:-true}
- SESSION_COOKIE_SAMESITE=${SESSION_COOKIE_SAMESITE:-Lax}
- REMEMBER_COOKIE_SECURE=${REMEMBER_COOKIE_SECURE:-true}
- CSRF_COOKIE_SECURE=${CSRF_COOKIE_SECURE:-true}
- CSRF_COOKIE_HTTPONLY=${CSRF_COOKIE_HTTPONLY:-false}
- CSRF_COOKIE_SAMESITE=${CSRF_COOKIE_SAMESITE:-Lax}
- CSRF_COOKIE_NAME=${CSRF_COOKIE_NAME:-XSRF-TOKEN}
- PREFERRED_URL_SCHEME=${PREFERRED_URL_SCHEME:-https}
- WTF_CSRF_TRUSTED_ORIGINS=${WTF_CSRF_TRUSTED_ORIGINS:-https://localhost}
- DATABASE_URL=postgresql+psycopg2://timetracker:timetracker@db:5432/timetracker
- REDIS_URL=redis://:${REDIS_PASSWORD:-timetracker}@redis:6379/0
- REDIS_ENABLED=${REDIS_ENABLED:-false}
- LOG_FILE=/app/logs/timetracker.log
# Analytics & Monitoring (optional)
# See docs/analytics.md for configuration details
- SENTRY_DSN=${SENTRY_DSN:-}
- SENTRY_TRACES_RATE=${SENTRY_TRACES_RATE:-0.0}
- OTEL_EXPORTER_OTLP_ENDPOINT=${OTEL_EXPORTER_OTLP_ENDPOINT:-}
- OTEL_EXPORTER_OTLP_TOKEN=${OTEL_EXPORTER_OTLP_TOKEN:-}
- OTEL_DEBUG_LOGGING=${OTEL_DEBUG_LOGGING:-false}
- ENABLE_TRACING=${ENABLE_TRACING:-true}
- ENABLE_METRICS=${ENABLE_METRICS:-true}
- OTEL_METRICS_EXPORT_INTERVAL_MS=${OTEL_METRICS_EXPORT_INTERVAL_MS:-60000}
- ENABLE_TELEMETRY=${ENABLE_TELEMETRY:-false}
- TELE_SALT=${TELE_SALT:-8f4a7b2e9c1d6f3a5e8b4c7d2a9f6e3b1c8d5a7f2e9b4c6d3a8f5e1b7c4d9a2f}
# AI helper (off by default; use Compose profile "ai" for bundled Ollama or set AI_ENABLED=true for external API)
- AI_ENABLED=${AI_ENABLED:-false}
- AI_PROVIDER=${AI_PROVIDER:-ollama}
- AI_BASE_URL=${AI_BASE_URL:-http://ollama:11434}
- AI_MODEL=${AI_MODEL:-llama3.1}
- AI_API_KEY=${AI_API_KEY:-}
- AI_TIMEOUT_SECONDS=${AI_TIMEOUT_SECONDS:-60}
- AI_CONTEXT_LIMIT=${AI_CONTEXT_LIMIT:-40}
# Expose only internally; nginx publishes ports
ports: []
volumes:
- app_data:/data
- app_logs:/app/logs
- app_uploads:/app/app/static/uploads
depends_on:
db:
condition: service_healthy
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "-s", "-o", "/dev/null", "http://localhost:8080/_health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
# Ollama - local LLM runtime for the TimeTracker AI helper (opt-in: docker compose --profile ai up -d)
# The app talks to it via the OpenAI-compatible endpoint at /v1/chat/completions.
# First boot pulls the model defined by AI_MODEL (default llama3.1, ~4.7 GB).
ollama:
profiles:
- ai
image: ollama/ollama:latest
container_name: timetracker-ollama
environment:
- OLLAMA_HOST=0.0.0.0:11434
- OLLAMA_KEEP_ALIVE=${OLLAMA_KEEP_ALIVE:-5m}
volumes:
- ollama_data:/root/.ollama
# Internal-only by default; uncomment to expose for host tools.
# ports:
# - "11434:11434"
healthcheck:
test: ["CMD-SHELL", "ollama list >/dev/null 2>&1 || exit 1"]
interval: 15s
timeout: 5s
retries: 10
start_period: 30s
restart: unless-stopped
# One-shot model puller; runs to completion on each `up` (no-op if model already cached).
ollama-init:
profiles:
- ai
image: ollama/ollama:latest
container_name: timetracker-ollama-init
depends_on:
ollama:
condition: service_healthy
environment:
- OLLAMA_HOST=http://ollama:11434
entrypoint: ["/bin/sh","-c"]
command:
- |
set -e
MODEL="${AI_MODEL:-llama3.1}"
echo "Pulling Ollama model: $$MODEL"
ollama pull "$$MODEL"
echo "Model ready: $$MODEL"
restart: "no"
db:
image: postgres:16-alpine
container_name: timetracker-db
environment:
- POSTGRES_DB=${POSTGRES_DB:-timetracker}
- POSTGRES_USER=${POSTGRES_USER:-timetracker}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-timetracker}
- TZ=${TZ:-Europe/Brussels}
volumes:
- db_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"]
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
restart: unless-stopped
# Redis - Caching and session storage
# Disabled - comment out to re-enable
# redis:
# image: redis:7-alpine
# container_name: timetracker-redis
# command: redis-server --appendonly yes --requirepass ${REDIS_PASSWORD:-timetracker}
# volumes:
# - redis_data:/data
# ports:
# - "6379:6379"
# healthcheck:
# test: ["CMD", "redis-cli", "--raw", "incr", "ping"]
# interval: 10s
# timeout: 3s
# retries: 5
# restart: unless-stopped
# Analytics & Monitoring Services
# All services start by default for complete monitoring
# See docs/analytics.md and ANALYTICS_QUICK_START.md for details
# Prometheus - Metrics collection and storage
# Disabled - comment out to re-enable
# prometheus:
# image: prom/prometheus:latest
# container_name: timetracker-prometheus
# volumes:
# - ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml
# - prometheus_data:/prometheus
# command:
# - '--config.file=/etc/prometheus/prometheus.yml'
# - '--storage.tsdb.path=/prometheus'
# - '--storage.tsdb.retention.time=30d'
# ports:
# - "9090:9090"
# restart: unless-stopped
# Grafana - Metrics visualization and dashboards
# Disabled - comment out to re-enable
# grafana:
# image: grafana/grafana:latest
# container_name: timetracker-grafana
# environment:
# - GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_ADMIN_PASSWORD:-admin}
# - GF_USERS_ALLOW_SIGN_UP=false
# - GF_SERVER_ROOT_URL=${GF_SERVER_ROOT_URL:-http://localhost:3000}
# volumes:
# - grafana_data:/var/lib/grafana
# - ./grafana/provisioning:/etc/grafana/provisioning
# ports:
# - "3000:3000"
# depends_on:
# - prometheus
# restart: unless-stopped
# Loki - Log aggregation
# Disabled - comment out to re-enable
# loki:
# image: grafana/loki:latest
# container_name: timetracker-loki
# volumes:
# - ./loki/loki-config.yml:/etc/loki/local-config.yaml
# - loki_data:/loki
# ports:
# - "3100:3100"
# command: -config.file=/etc/loki/local-config.yaml
# restart: unless-stopped
# Promtail - Log shipping to Loki
# Disabled - comment out to re-enable
# promtail:
# image: grafana/promtail:latest
# container_name: timetracker-promtail
# volumes:
# - ./logs:/var/log/timetracker:ro
# - ./promtail/promtail-config.yml:/etc/promtail/config.yml
# command: -config.file=/etc/promtail/config.yml
# depends_on:
# - loki
# restart: unless-stopped
volumes:
app_data:
driver: local
app_logs:
driver: local
app_uploads:
driver: local
db_data:
driver: local
prometheus_data:
driver: local
grafana_data:
driver: local
loki_data:
driver: local
redis_data:
driver: local
ollama_data:
driver: local