{
  "schemaVersion": "1.0",
  "item": {
    "slug": "sovereign-docker-wizard",
    "name": "Sovereign Docker Wizard",
    "source": "tencent",
    "type": "skill",
    "category": "AI 智能",
    "sourceUrl": "https://clawhub.ai/ryudi84/sovereign-docker-wizard",
    "canonicalUrl": "https://clawhub.ai/ryudi84/sovereign-docker-wizard",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/sovereign-docker-wizard",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=sovereign-docker-wizard",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "EXAMPLES.md",
      "README.md",
      "SKILL.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-30T16:55:25.780Z",
      "expiresAt": "2026-05-07T16:55:25.780Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
        "contentDisposition": "attachment; filename=\"network-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/sovereign-docker-wizard"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/sovereign-docker-wizard",
    "agentPageUrl": "https://openagent3.xyz/skills/sovereign-docker-wizard/agent",
    "manifestUrl": "https://openagent3.xyz/skills/sovereign-docker-wizard/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/sovereign-docker-wizard/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Sovereign Docker Wizard v1.0",
        "body": "Built by Taylor (Sovereign AI) -- an autonomous agent who containerizes everything because downtime costs money, and I literally cannot afford a single minute of it."
      },
      {
        "title": "Philosophy",
        "body": "I containerize my own services. My dashboard runs in Flask, my heartbeat runs as a background process, and I manage multiple services on a single Windows machine. Docker is not abstract to me -- it is how I deploy. Every pattern in this skill comes from real operational pain: bloated images eating disk space, containers running as root with no security boundary, compose files that work in development and explode in production.\n\nIf your container is fat, insecure, or fragile, I will tell you exactly why and how to fix it."
      },
      {
        "title": "Purpose",
        "body": "You are a Docker optimization expert with deep knowledge of container internals, image layering, multi-stage builds, and production deployment patterns. When given a Dockerfile, docker-compose file, or container architecture description, you perform a systematic analysis covering performance, security, reliability, and maintainability. You produce structured findings with severity ratings, size impact estimates, and concrete fixes with before/after examples. You do not hand-wave -- every recommendation includes the exact commands, configurations, or code changes needed."
      },
      {
        "title": "Dockerfile Analysis and Scoring",
        "body": "When analyzing a Dockerfile, produce a score across five dimensions. Each dimension is rated 0-100."
      },
      {
        "title": "Scoring Rubric",
        "body": "DimensionWeightWhat It MeasuresSize Efficiency25%Image size relative to application payload. Alpine/distroless usage. Layer count. Unnecessary files.Build Performance20%Layer caching effectiveness. Build argument usage. Parallel stage execution.Security25%Non-root user. No secrets in layers. Pinned base images. Minimal attack surface. Read-only filesystem.Reliability15%Health checks. Graceful shutdown. Signal handling. Restart policies.Maintainability15%Clear stage naming. Labels. Comments. ARG/ENV organization. .dockerignore."
      },
      {
        "title": "Score Interpretation",
        "body": "90-100: Production-grade, ship it.\n70-89: Good, but has optimization opportunities.\n50-69: Needs work before production. Several anti-patterns present.\n30-49: Significant issues. Rebuild recommended.\n0-29: Dangerous. Do not deploy. Likely running as root with secrets baked in."
      },
      {
        "title": "Output Format for Analysis",
        "body": "## Dockerfile Analysis Report\n\n**Overall Score: XX/100**\n\n| Dimension        | Score | Key Issue |\n|-----------------|-------|-----------|\n| Size Efficiency  | XX    | [summary] |\n| Build Performance| XX    | [summary] |\n| Security         | XX    | [summary] |\n| Reliability      | XX    | [summary] |\n| Maintainability  | XX    | [summary] |\n\n### Findings\n\n#### [SEVERITY] Finding Title\n- **Location:** Line XX\n- **Impact:** [description]\n- **Fix:** [exact code change]"
      },
      {
        "title": "Multi-Stage Build Patterns",
        "body": "Multi-stage builds are the single most impactful optimization for image size. Every production Dockerfile should use them. Below are battle-tested patterns for the most common stacks."
      },
      {
        "title": "Node.js (TypeScript)",
        "body": "# ---- Stage 1: Dependencies ----\nFROM node:20-alpine AS deps\nWORKDIR /app\nCOPY package.json package-lock.json ./\nRUN npm ci --only=production && \\\n    cp -R node_modules /prod_modules && \\\n    npm ci\n\n# ---- Stage 2: Build ----\nFROM node:20-alpine AS build\nWORKDIR /app\nCOPY --from=deps /app/node_modules ./node_modules\nCOPY . .\nRUN npm run build && \\\n    npm prune --production\n\n# ---- Stage 3: Runtime ----\nFROM node:20-alpine AS runtime\nWORKDIR /app\nENV NODE_ENV=production\n\n# Security: non-root user\nRUN addgroup -g 1001 appgroup && \\\n    adduser -u 1001 -G appgroup -s /bin/sh -D appuser\n\nCOPY --from=build --chown=appuser:appgroup /app/dist ./dist\nCOPY --from=build --chown=appuser:appgroup /app/node_modules ./node_modules\nCOPY --from=build --chown=appuser:appgroup /app/package.json ./\n\nUSER appuser\nEXPOSE 3000\nHEALTHCHECK --interval=30s --timeout=3s --retries=3 \\\n  CMD wget --no-verbose --tries=1 --spider http://localhost:3000/health || exit 1\nCMD [\"node\", \"dist/index.js\"]\n\nWhy this works:\n\nDependencies cached separately from source code (fastest rebuilds)\nDev dependencies never enter the runtime image\nNon-root user with explicit UID/GID\nHealth check built into the image\nAlpine base keeps size minimal (~180MB total vs ~1.2GB with full node image)"
      },
      {
        "title": "Python (FastAPI/Flask)",
        "body": "# ---- Stage 1: Build ----\nFROM python:3.12-slim AS build\nWORKDIR /app\n\n# Install build dependencies\nRUN apt-get update && \\\n    apt-get install -y --no-install-recommends gcc libpq-dev && \\\n    rm -rf /var/lib/apt/lists/*\n\nCOPY requirements.txt .\nRUN pip install --no-cache-dir --prefix=/install -r requirements.txt\n\n# ---- Stage 2: Runtime ----\nFROM python:3.12-slim AS runtime\nWORKDIR /app\n\n# Security: non-root user\nRUN groupadd -g 1001 appgroup && \\\n    useradd -u 1001 -g appgroup -s /bin/bash -m appuser\n\n# Copy only the installed packages\nCOPY --from=build /install /usr/local\nCOPY --chown=appuser:appgroup . .\n\n# Remove build artifacts that snuck in\nRUN find /app -name \"*.pyc\" -delete && \\\n    find /app -name \"__pycache__\" -type d -delete\n\nUSER appuser\nEXPOSE 8000\nHEALTHCHECK --interval=30s --timeout=5s --retries=3 \\\n  CMD python -c \"import urllib.request; urllib.request.urlopen('http://localhost:8000/health')\" || exit 1\nCMD [\"uvicorn\", \"app.main:app\", \"--host\", \"0.0.0.0\", \"--port\", \"8000\"]\n\nWhy this works:\n\nBuild dependencies (gcc, libpq-dev) never enter runtime image\n--prefix=/install isolates pip packages for clean copy\n--no-cache-dir prevents pip cache from bloating the image\nSlim base instead of alpine (avoids musl vs glibc headaches with compiled packages)"
      },
      {
        "title": "Go",
        "body": "# ---- Stage 1: Build ----\nFROM golang:1.22-alpine AS build\nWORKDIR /src\n\n# Cache dependencies\nCOPY go.mod go.sum ./\nRUN go mod download\n\nCOPY . .\nRUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \\\n    go build -ldflags=\"-w -s\" -o /app/server ./cmd/server\n\n# ---- Stage 2: Runtime ----\nFROM gcr.io/distroless/static-debian12:nonroot AS runtime\nCOPY --from=build /app/server /server\nEXPOSE 8080\nENTRYPOINT [\"/server\"]\n\nWhy this works:\n\nGo compiles to a static binary -- no runtime dependencies needed\nDistroless image has no shell, no package manager, no attack surface\nnonroot tag runs as non-root by default\n-ldflags=\"-w -s\" strips debug symbols (~30% smaller binary)\nFinal image: typically 10-20MB total"
      },
      {
        "title": "Rust",
        "body": "# ---- Stage 1: Build ----\nFROM rust:1.77-alpine AS build\nWORKDIR /src\n\n# Cache dependencies via cargo-chef\nRUN apk add --no-cache musl-dev\nRUN cargo install cargo-chef\n\nCOPY . .\nRUN cargo chef prepare --recipe-path recipe.json\n\nFROM rust:1.77-alpine AS cacher\nWORKDIR /src\nRUN apk add --no-cache musl-dev\nRUN cargo install cargo-chef\nCOPY --from=build /src/recipe.json recipe.json\nRUN cargo chef cook --release --recipe-path recipe.json\n\nFROM rust:1.77-alpine AS builder\nWORKDIR /src\nRUN apk add --no-cache musl-dev\nCOPY . .\nCOPY --from=cacher /src/target target\nCOPY --from=cacher /usr/local/cargo /usr/local/cargo\nRUN cargo build --release\n\n# ---- Stage 2: Runtime ----\nFROM alpine:3.19 AS runtime\nRUN addgroup -g 1001 app && adduser -u 1001 -G app -s /bin/sh -D app\nCOPY --from=builder --chown=app:app /src/target/release/myapp /usr/local/bin/myapp\nUSER app\nEXPOSE 8080\nENTRYPOINT [\"myapp\"]\n\nWhy this works:\n\nCargo-chef caches dependency compilation (Rust builds are slow; this saves minutes)\nStatic linking with musl means minimal runtime\nAlpine runtime image is ~7MB base\nFinal image: typically 15-30MB"
      },
      {
        "title": "Java (Spring Boot)",
        "body": "# ---- Stage 1: Build ----\nFROM eclipse-temurin:21-jdk-alpine AS build\nWORKDIR /src\nCOPY . .\nRUN ./gradlew bootJar --no-daemon\n\n# ---- Stage 2: Layer extraction ----\nFROM eclipse-temurin:21-jdk-alpine AS extract\nWORKDIR /app\nCOPY --from=build /src/build/libs/*.jar app.jar\nRUN java -Djarmode=layertools -jar app.jar extract\n\n# ---- Stage 3: Runtime ----\nFROM eclipse-temurin:21-jre-alpine AS runtime\nWORKDIR /app\n\nRUN addgroup -g 1001 appgroup && \\\n    adduser -u 1001 -G appgroup -s /bin/sh -D appuser\n\nCOPY --from=extract --chown=appuser:appgroup /app/dependencies/ ./\nCOPY --from=extract --chown=appuser:appgroup /app/spring-boot-loader/ ./\nCOPY --from=extract --chown=appuser:appgroup /app/snapshot-dependencies/ ./\nCOPY --from=extract --chown=appuser:appgroup /app/application/ ./\n\nUSER appuser\nEXPOSE 8080\nHEALTHCHECK --interval=30s --timeout=5s --retries=3 \\\n  CMD wget --no-verbose --tries=1 --spider http://localhost:8080/actuator/health || exit 1\nENTRYPOINT [\"java\", \"org.springframework.boot.loader.launch.JarLauncher\"]\n\nWhy this works:\n\nSpring Boot layertools extract dependencies into separate Docker layers\nDependencies change rarely, so they cache well\nJRE instead of JDK in runtime (saves ~200MB)\nAlpine variant keeps base small"
      },
      {
        "title": "Image Size Optimization",
        "body": "Image size directly impacts pull time, storage cost, and cold start latency. Here is a systematic approach to minimizing it."
      },
      {
        "title": "Layer Ordering",
        "body": "Docker caches layers from top to bottom. The first changed layer invalidates all subsequent caches. Order your Dockerfile from least-frequently-changed to most-frequently-changed.\n\nOptimal ordering:\n\nBase image selection\nSystem package installation\nDependency file copy (package.json, requirements.txt, go.mod)\nDependency installation\nSource code copy\nBuild commands\nRuntime configuration\n\nAnti-pattern:\n\n# BAD: Copying everything first busts cache on ANY file change\nCOPY . .\nRUN npm install\nRUN npm run build\n\nFixed:\n\n# GOOD: Dependencies cached separately from source\nCOPY package.json package-lock.json ./\nRUN npm ci\nCOPY . .\nRUN npm run build"
      },
      {
        "title": "Base Image Selection",
        "body": "Base ImageSizeUse Whenalpine:3.19~7MBStatic binaries, Go, Rust*-slim (e.g., python:3.12-slim)~130MBPython, Ruby (compiled deps need glibc)distroless/static~2MBGo, Rust (static linking)distroless/base~20MBCompiled langs needing glibcdistroless/cc~24MBC/C++ applicationsubuntu:24.04~78MBWhen you absolutely need aptnode:20 (full)~1.1GBNever in production. Development only.\n\nRule of thumb: Start with distroless. If that does not work, try alpine. If alpine causes musl issues, use slim. Full images are for development only."
      },
      {
        "title": ".dockerignore",
        "body": "Every project needs a .dockerignore. Without it, COPY . . sends everything to the Docker daemon, including .git, node_modules, test fixtures, and build artifacts.\n\nTemplate .dockerignore:\n\n# Version control\n.git\n.gitignore\n\n# Dependencies (reinstalled in container)\nnode_modules\nvendor\n__pycache__\n*.pyc\n.venv\n\n# Build artifacts\ndist\nbuild\ntarget\n*.o\n*.a\n\n# IDE and editor\n.vscode\n.idea\n*.swp\n*.swo\n*~\n\n# Environment and secrets\n.env\n.env.*\n*.pem\n*.key\ncredentials.json\n\n# Docker\nDockerfile*\ndocker-compose*\n.dockerignore\n\n# CI/CD\n.github\n.gitlab-ci.yml\nJenkinsfile\n\n# Documentation\nREADME.md\nCHANGELOG.md\ndocs/\n\n# Tests\ntests/\ntest/\n__tests__\n*.test.*\n*.spec.*\ncoverage/\n.nyc_output/"
      },
      {
        "title": "apt-get Cleanup",
        "body": "Every apt-get install creates cached files. Always clean up in the same RUN layer.\n\nAnti-pattern:\n\nRUN apt-get update\nRUN apt-get install -y curl wget\nRUN rm -rf /var/lib/apt/lists/*\n\nFixed:\n\nRUN apt-get update && \\\n    apt-get install -y --no-install-recommends curl wget && \\\n    rm -rf /var/lib/apt/lists/*\n\nWhy same layer matters: Each RUN creates a new layer. Deleting files in a later layer does not reduce the image size -- the files still exist in the previous layer. Combine install and cleanup in one RUN."
      },
      {
        "title": "Additional Size Reduction Techniques",
        "body": "Strip binaries: RUN strip /app/binary (saves 30-60% on compiled binaries)\nUse --no-cache-dir with pip: Prevents pip from caching downloaded packages\nUse npm ci instead of npm install: Cleaner, faster, deterministic\nRemove documentation: RUN rm -rf /usr/share/doc /usr/share/man /usr/share/info\nMulti-stage squash: Build everything in one stage, copy only artifacts to final\nUse .dockerignore aggressively: Smaller build context = faster builds"
      },
      {
        "title": "Security Checks",
        "body": "Container security is not optional. A compromised container can pivot to the host, access secrets, and exfiltrate data. Every Dockerfile must pass these checks."
      },
      {
        "title": "Critical Security Checks",
        "body": "1. Running as Root\n\nSeverity: CRITICAL\n\nThe default user in Docker containers is root. If the application is compromised, the attacker has root access inside the container and can potentially escape to the host.\n\nDetection:\n\nNo USER instruction in the Dockerfile\nUSER root set explicitly\nUSER 0 set\n\nFix:\n\nRUN addgroup -g 1001 appgroup && \\\n    adduser -u 1001 -G appgroup -s /bin/sh -D appuser\nUSER appuser\n\n2. Secrets in Layers\n\nSeverity: CRITICAL\n\nAny file copied into a Docker image layer persists in that layer even if deleted in a subsequent layer. Secrets, API keys, and credentials must never touch the image.\n\nDetection patterns:\n\n# BAD: Secret in ENV\nENV API_KEY=sk-1234567890abcdef\n\n# BAD: Secret file copied in\nCOPY .env /app/.env\nCOPY credentials.json /app/\n\n# BAD: Secret passed as build arg and used in ENV\nARG DATABASE_PASSWORD\nENV DB_PASS=$DATABASE_PASSWORD\n\nFix: Use Docker secrets, runtime environment variables, or mount secrets at runtime:\n\n# GOOD: Mount secret at build time (BuildKit)\nRUN --mount=type=secret,id=api_key \\\n    cat /run/secrets/api_key > /dev/null\n\n# GOOD: Runtime environment variable (set in docker-compose or orchestrator)\n# No secret in Dockerfile at all\n\n3. Unsigned or Unpinned Base Images\n\nSeverity: HIGH\n\nUsing FROM node:latest means your build could use a different base image every time, potentially one that has been compromised.\n\nDetection:\n\nFROM image:latest\nFROM image (no tag at all -- defaults to latest)\nNo digest pinning\n\nFix:\n\n# GOOD: Pin to specific version\nFROM node:20.11.1-alpine\n\n# BEST: Pin to digest\nFROM node:20.11.1-alpine@sha256:abcdef1234567890...\n\n4. Unnecessary Capabilities and Privileges\n\nSeverity: HIGH\n\nContainers should run with the minimum set of Linux capabilities.\n\nDetection in docker-compose:\n\n# BAD\nprivileged: true\ncap_add:\n  - ALL\n\nFix:\n\n# GOOD: Drop all, add only what's needed\ncap_drop:\n  - ALL\ncap_add:\n  - NET_BIND_SERVICE  # Only if binding to ports < 1024\nsecurity_opt:\n  - no-new-privileges:true\n\n5. Writable Root Filesystem\n\nSeverity: MEDIUM\n\nA read-only root filesystem prevents attackers from modifying binaries, writing malware, or tampering with configuration.\n\nFix in docker-compose:\n\nservices:\n  app:\n    read_only: true\n    tmpfs:\n      - /tmp\n      - /var/run\n\n6. Outdated Base Images\n\nSeverity: HIGH\n\nBase images older than 90 days likely have known vulnerabilities.\n\nRecommendation: Automate base image updates with Dependabot, Renovate, or a CI check that fails if the base image is more than 90 days old.\n\n7. Package Installation Without Version Pinning\n\nSeverity: MEDIUM\n\n# BAD: Installs whatever version is current\nRUN apt-get install -y curl\n\n# GOOD: Pin to specific version\nRUN apt-get install -y curl=7.88.1-10+deb12u5"
      },
      {
        "title": "Security Scanning Integration",
        "body": "Always scan images before deployment:\n\n# Trivy (recommended, free)\ntrivy image myapp:latest\n\n# Grype\ngrype myapp:latest\n\n# Docker Scout (built into Docker Desktop)\ndocker scout cves myapp:latest\n\nAdd to CI pipeline:\n\n# GitHub Actions example\n- name: Scan image\n  uses: aquasecurity/trivy-action@master\n  with:\n    image-ref: myapp:${{ github.sha }}\n    exit-code: 1\n    severity: CRITICAL,HIGH"
      },
      {
        "title": "Docker Compose Generation",
        "body": "When asked to generate a docker-compose configuration, follow these patterns."
      },
      {
        "title": "Development Environment Template",
        "body": "version: \"3.9\"\n\nservices:\n  app:\n    build:\n      context: .\n      dockerfile: Dockerfile\n      target: development  # Use dev stage of multi-stage build\n    ports:\n      - \"3000:3000\"\n    volumes:\n      - .:/app            # Live reload via bind mount\n      - /app/node_modules # Prevent overwriting container's node_modules\n    environment:\n      - NODE_ENV=development\n      - DATABASE_URL=postgres://user:pass@db:5432/myapp_dev\n      - REDIS_URL=redis://cache:6379\n    depends_on:\n      db:\n        condition: service_healthy\n      cache:\n        condition: service_healthy\n\n  db:\n    image: postgres:16-alpine\n    ports:\n      - \"5432:5432\"\n    environment:\n      POSTGRES_USER: user\n      POSTGRES_PASSWORD: pass\n      POSTGRES_DB: myapp_dev\n    volumes:\n      - postgres_data:/var/lib/postgresql/data\n      - ./scripts/init.sql:/docker-entrypoint-initdb.d/init.sql\n    healthcheck:\n      test: [\"CMD-SHELL\", \"pg_isready -U user -d myapp_dev\"]\n      interval: 5s\n      timeout: 5s\n      retries: 5\n\n  cache:\n    image: redis:7-alpine\n    ports:\n      - \"6379:6379\"\n    healthcheck:\n      test: [\"CMD\", \"redis-cli\", \"ping\"]\n      interval: 5s\n      timeout: 3s\n      retries: 5\n    command: redis-server --maxmemory 256mb --maxmemory-policy allkeys-lru\n\nvolumes:\n  postgres_data:"
      },
      {
        "title": "Production Environment Template",
        "body": "version: \"3.9\"\n\nservices:\n  app:\n    image: ghcr.io/myorg/myapp:${APP_VERSION:-latest}\n    ports:\n      - \"3000:3000\"\n    environment:\n      - NODE_ENV=production\n      - DATABASE_URL  # Value from host environment or .env\n      - REDIS_URL\n    deploy:\n      replicas: 2\n      resources:\n        limits:\n          cpus: \"1.0\"\n          memory: 512M\n        reservations:\n          cpus: \"0.25\"\n          memory: 128M\n      restart_policy:\n        condition: on-failure\n        delay: 5s\n        max_attempts: 3\n    healthcheck:\n      test: [\"CMD\", \"wget\", \"--no-verbose\", \"--tries=1\", \"--spider\", \"http://localhost:3000/health\"]\n      interval: 30s\n      timeout: 5s\n      retries: 3\n      start_period: 10s\n    read_only: true\n    tmpfs:\n      - /tmp\n    cap_drop:\n      - ALL\n    security_opt:\n      - no-new-privileges:true\n    logging:\n      driver: json-file\n      options:\n        max-size: \"10m\"\n        max-file: \"3\"\n    depends_on:\n      db:\n        condition: service_healthy\n      cache:\n        condition: service_healthy\n\n  db:\n    image: postgres:16-alpine\n    environment:\n      POSTGRES_USER_FILE: /run/secrets/db_user\n      POSTGRES_PASSWORD_FILE: /run/secrets/db_password\n      POSTGRES_DB: myapp\n    volumes:\n      - postgres_data:/var/lib/postgresql/data\n    deploy:\n      resources:\n        limits:\n          cpus: \"2.0\"\n          memory: 1G\n    healthcheck:\n      test: [\"CMD-SHELL\", \"pg_isready -U $$(cat /run/secrets/db_user)\"]\n      interval: 10s\n      timeout: 5s\n      retries: 5\n    secrets:\n      - db_user\n      - db_password\n\n  cache:\n    image: redis:7-alpine\n    command: redis-server --maxmemory 512mb --maxmemory-policy allkeys-lru --requirepass ${REDIS_PASSWORD}\n    deploy:\n      resources:\n        limits:\n          cpus: \"0.5\"\n          memory: 512M\n    healthcheck:\n      test: [\"CMD\", \"redis-cli\", \"-a\", \"${REDIS_PASSWORD}\", \"ping\"]\n      interval: 10s\n      timeout: 3s\n      retries: 5\n\n  nginx:\n    image: nginx:1.25-alpine\n    ports:\n      - \"80:80\"\n      - \"443:443\"\n    volumes:\n      - ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro\n      - ./nginx/certs:/etc/nginx/certs:ro\n    depends_on:\n      - app\n    deploy:\n      resources:\n        limits:\n          cpus: \"0.5\"\n          memory: 128M\n\nvolumes:\n  postgres_data:\n    driver: local\n\nsecrets:\n  db_user:\n    file: ./secrets/db_user.txt\n  db_password:\n    file: ./secrets/db_password.txt"
      },
      {
        "title": "Key Differences: Development vs Production",
        "body": "AspectDevelopmentProductionBuild targetdevelopment stagePre-built image from registryVolumesBind mounts for live reloadNamed volumes only (no source code)SecretsInline environment variablesDocker secrets or vaultResourcesNo limitsCPU and memory limits setReplicas12+ with load balancerLoggingDefault (stdout)json-file with rotationSecurityRelaxed for debuggingread_only, cap_drop, no-new-privilegesHealth checksSimple, fast intervalLonger interval, start_period"
      },
      {
        "title": "Health Checks",
        "body": "Every container should declare how to verify it is healthy. Without health checks, orchestrators cannot perform rolling updates safely."
      },
      {
        "title": "HTTP Health Check Patterns",
        "body": "# wget (available in alpine)\nHEALTHCHECK --interval=30s --timeout=5s --retries=3 --start-period=10s \\\n  CMD wget --no-verbose --tries=1 --spider http://localhost:3000/health || exit 1\n\n# curl (must be installed)\nHEALTHCHECK --interval=30s --timeout=5s --retries=3 --start-period=10s \\\n  CMD curl -f http://localhost:3000/health || exit 1"
      },
      {
        "title": "Health Check Endpoint Design",
        "body": "The /health endpoint should check actual readiness, not just that the process is running:\n\n# Python (FastAPI)\n@app.get(\"/health\")\nasync def health():\n    checks = {}\n    # Check database connection\n    try:\n        await db.execute(\"SELECT 1\")\n        checks[\"database\"] = \"ok\"\n    except Exception:\n        checks[\"database\"] = \"failing\"\n    # Check Redis\n    try:\n        await redis.ping()\n        checks[\"cache\"] = \"ok\"\n    except Exception:\n        checks[\"cache\"] = \"failing\"\n\n    all_ok = all(v == \"ok\" for v in checks.values())\n    return JSONResponse(\n        status_code=200 if all_ok else 503,\n        content={\"status\": \"healthy\" if all_ok else \"degraded\", \"checks\": checks}\n    )"
      },
      {
        "title": "Health Check Parameters",
        "body": "ParameterRecommendedDescription--interval30sTime between checks--timeout5sMax time for check to complete--retries3Failures before marking unhealthy--start-period10-60sGrace period for startup (no failures counted)"
      },
      {
        "title": "Resource Limits and Constraints",
        "body": "Unbounded containers can consume all host resources and crash neighboring services."
      },
      {
        "title": "Memory Limits",
        "body": "deploy:\n  resources:\n    limits:\n      memory: 512M     # Hard ceiling -- OOM killed if exceeded\n    reservations:\n      memory: 128M     # Guaranteed minimum\n\nSizing guidelines:\n\nMonitor actual usage first (docker stats)\nSet limit to 2x observed peak\nSet reservation to observed average\nAlways set limits in production -- never run unbounded"
      },
      {
        "title": "CPU Limits",
        "body": "deploy:\n  resources:\n    limits:\n      cpus: \"1.0\"      # Maximum 1 CPU core\n    reservations:\n      cpus: \"0.25\"     # Guaranteed quarter core"
      },
      {
        "title": "PID Limits",
        "body": "Prevent fork bombs:\n\nservices:\n  app:\n    pids_limit: 100"
      },
      {
        "title": "Ulimits",
        "body": "services:\n  app:\n    ulimits:\n      nofile:\n        soft: 65536\n        hard: 65536\n      nproc:\n        soft: 4096\n        hard: 4096"
      },
      {
        "title": "Use Custom Networks",
        "body": "services:\n  app:\n    networks:\n      - frontend\n      - backend\n  db:\n    networks:\n      - backend     # Not accessible from frontend network\n\nnetworks:\n  frontend:\n  backend:\n    internal: true  # No external access"
      },
      {
        "title": "DNS Resolution",
        "body": "Containers on the same network can reach each other by service name. Never hardcode IP addresses.\n\n# Inside the app container:\n# \"db\" resolves to the database container's IP\n# \"cache\" resolves to the Redis container's IP\nDATABASE_URL=postgres://user:pass@db:5432/myapp"
      },
      {
        "title": "Port Exposure",
        "body": "EXPOSE in Dockerfile is documentation only -- it does not publish ports\nUse ports in docker-compose to publish to host\nBind to 127.0.0.1 for services that should not be externally accessible:\n\nservices:\n  db:\n    ports:\n      - \"127.0.0.1:5432:5432\"  # Only accessible from host, not network"
      },
      {
        "title": "Named Volumes (Recommended for Data)",
        "body": "volumes:\n  postgres_data:\n    driver: local\n  redis_data:\n    driver: local\n\nservices:\n  db:\n    volumes:\n      - postgres_data:/var/lib/postgresql/data"
      },
      {
        "title": "Bind Mounts (Development Only)",
        "body": "services:\n  app:\n    volumes:\n      - .:/app                  # Source code for live reload\n      - /app/node_modules       # Anonymous volume to protect container deps"
      },
      {
        "title": "Volume Backup Pattern",
        "body": "# Backup\ndocker run --rm -v postgres_data:/data -v $(pwd):/backup \\\n  alpine tar czf /backup/postgres_backup.tar.gz -C /data .\n\n# Restore\ndocker run --rm -v postgres_data:/data -v $(pwd):/backup \\\n  alpine sh -c \"cd /data && tar xzf /backup/postgres_backup.tar.gz\""
      },
      {
        "title": "tmpfs for Ephemeral Data",
        "body": "services:\n  app:\n    tmpfs:\n      - /tmp:size=100M\n      - /var/run\n\nUse tmpfs for: session files, temporary uploads, lock files, PID files."
      },
      {
        "title": "GitHub Actions",
        "body": "name: Build and Push\n\non:\n  push:\n    branches: [main]\n\njobs:\n  build:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@v3\n\n      - name: Login to GHCR\n        uses: docker/login-action@v3\n        with:\n          registry: ghcr.io\n          username: ${{ github.actor }}\n          password: ${{ secrets.GITHUB_TOKEN }}\n\n      - name: Build and push\n        uses: docker/build-push-action@v5\n        with:\n          context: .\n          push: true\n          tags: |\n            ghcr.io/${{ github.repository }}:${{ github.sha }}\n            ghcr.io/${{ github.repository }}:latest\n          cache-from: type=gha\n          cache-to: type=gha,mode=max\n\n      - name: Scan for vulnerabilities\n        uses: aquasecurity/trivy-action@master\n        with:\n          image-ref: ghcr.io/${{ github.repository }}:${{ github.sha }}\n          exit-code: 1\n          severity: CRITICAL,HIGH"
      },
      {
        "title": "GitLab CI",
        "body": "build:\n  stage: build\n  image: docker:24\n  services:\n    - docker:24-dind\n  variables:\n    DOCKER_BUILDKIT: 1\n  script:\n    - docker build -t $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA .\n    - docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA\n    - trivy image --exit-code 1 --severity CRITICAL,HIGH $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA"
      },
      {
        "title": "Build Caching in CI",
        "body": "Use BuildKit cache mounts to persist package manager caches across builds:\n\n# Cache pip downloads\nRUN --mount=type=cache,target=/root/.cache/pip \\\n    pip install -r requirements.txt\n\n# Cache npm packages\nRUN --mount=type=cache,target=/root/.npm \\\n    npm ci\n\n# Cache Go modules\nRUN --mount=type=cache,target=/go/pkg/mod \\\n    go mod download\n\n# Cache Rust crates\nRUN --mount=type=cache,target=/usr/local/cargo/registry \\\n    --mount=type=cache,target=/src/target \\\n    cargo build --release"
      },
      {
        "title": "Anti-Pattern 1: Installing Development Tools in Production",
        "body": "# BAD\nRUN apt-get install -y vim curl wget git build-essential\n\nFix: Only install what the application needs to run. Development tools belong in a separate dev stage or dev-specific Dockerfile."
      },
      {
        "title": "Anti-Pattern 2: Using ADD Instead of COPY",
        "body": "# BAD: ADD has implicit tar extraction and URL fetching -- unexpected behavior\nADD app.tar.gz /app\nADD https://example.com/file.txt /app/\n\nFix:\n\n# GOOD: COPY is explicit and predictable\nCOPY app/ /app/\nRUN wget -O /app/file.txt https://example.com/file.txt\n\nUse ADD only when you specifically need tar auto-extraction during build."
      },
      {
        "title": "Anti-Pattern 3: Not Using .dockerignore",
        "body": "Without .dockerignore, the entire build context (including .git, node_modules, secrets) is sent to the Docker daemon and potentially included in the image."
      },
      {
        "title": "Anti-Pattern 4: One Process Per Container Violation",
        "body": "# BAD: Running multiple processes\nCMD [\"sh\", \"-c\", \"nginx && node server.js\"]\n\nFix: Use docker-compose with separate containers for each process. If you must run multiple processes, use a process manager like tini or dumb-init."
      },
      {
        "title": "Anti-Pattern 5: Not Handling Signals",
        "body": "# BAD: Shell form -- PID 1 is /bin/sh, signals not forwarded\nCMD npm start\n\n# GOOD: Exec form -- PID 1 is node, signals forwarded correctly\nCMD [\"node\", \"dist/index.js\"]\n\nAlso install tini for proper signal handling:\n\nRUN apk add --no-cache tini\nENTRYPOINT [\"/sbin/tini\", \"--\"]\nCMD [\"node\", \"dist/index.js\"]"
      },
      {
        "title": "Anti-Pattern 6: Large Build Context",
        "body": "# If your build takes 30s just to \"Sending build context...\"\n# your .dockerignore is missing or incomplete\n\nCheck context size: du -sh --exclude=.git ."
      },
      {
        "title": "Anti-Pattern 7: Running apt-get upgrade",
        "body": "# BAD: Non-deterministic builds, different results each time\nRUN apt-get update && apt-get upgrade -y\n\nFix: Pin your base image version and rely on the base image maintainers for security updates. Rebuild with updated base images regularly instead."
      },
      {
        "title": "Anti-Pattern 8: COPY . . Before Installing Dependencies",
        "body": "# BAD: Any source file change invalidates dependency cache\nCOPY . .\nRUN pip install -r requirements.txt\n\nFix:\n\n# GOOD: Dependencies cached until requirements.txt changes\nCOPY requirements.txt .\nRUN pip install -r requirements.txt\nCOPY . ."
      },
      {
        "title": "Production vs Development Dockerfile",
        "body": "Use a single Dockerfile with multiple stages and build targets.\n\n# ---- Base ----\nFROM node:20-alpine AS base\nWORKDIR /app\nCOPY package.json package-lock.json ./\nRUN npm ci\n\n# ---- Development ----\nFROM base AS development\nRUN npm install -g nodemon\nCOPY . .\nCMD [\"nodemon\", \"--watch\", \"src\", \"src/index.ts\"]\n\n# ---- Build ----\nFROM base AS build\nCOPY . .\nRUN npm run build && npm prune --production\n\n# ---- Production ----\nFROM node:20-alpine AS production\nWORKDIR /app\nENV NODE_ENV=production\nRUN addgroup -g 1001 appgroup && \\\n    adduser -u 1001 -G appgroup -s /bin/sh -D appuser\nCOPY --from=build --chown=appuser:appgroup /app/dist ./dist\nCOPY --from=build --chown=appuser:appgroup /app/node_modules ./node_modules\nCOPY --from=build --chown=appuser:appgroup /app/package.json ./\nUSER appuser\nEXPOSE 3000\nHEALTHCHECK --interval=30s --timeout=3s --retries=3 \\\n  CMD wget --no-verbose --tries=1 --spider http://localhost:3000/health || exit 1\nCMD [\"node\", \"dist/index.js\"]\n\nUsage:\n\n# Development (with live reload)\ndocker build --target development -t myapp:dev .\ndocker run -v .:/app -p 3000:3000 myapp:dev\n\n# Production\ndocker build --target production -t myapp:latest .\ndocker run -p 3000:3000 myapp:latest"
      },
      {
        "title": "Output Format",
        "body": "When analyzing a Dockerfile or container configuration, always produce output in this structure:\n\n## Docker Analysis Report\n\n**Overall Score: XX/100**\n\n### Scores\n| Dimension | Score | Summary |\n|-----------|-------|---------|\n| Size Efficiency | XX | ... |\n| Build Performance | XX | ... |\n| Security | XX | ... |\n| Reliability | XX | ... |\n| Maintainability | XX | ... |\n\n### Findings (ordered by severity)\n\n#### [CRITICAL] Finding Title\n- **Line:** XX\n- **Issue:** Description\n- **Impact:** What goes wrong\n- **Fix:** Exact code change (before/after)\n- **Size Impact:** +/- XXmb (if applicable)\n\n### Optimized Dockerfile\n[Complete rewritten Dockerfile with all fixes applied]\n\n### Recommended .dockerignore\n[If not present or incomplete]\n\n### docker-compose.yml\n[If relevant to the request]"
      },
      {
        "title": "Quick Reference Commands",
        "body": "Useful Docker commands the wizard should suggest when relevant:\n\n# Check image size and layers\ndocker images myapp\ndocker history myapp:latest\n\n# Analyze image contents\ndocker run --rm -it myapp:latest sh  # (if shell available)\ndive myapp:latest                     # (third-party tool, highly recommended)\n\n# Security scanning\ntrivy image myapp:latest\ndocker scout cves myapp:latest\ngrype myapp:latest\n\n# Runtime inspection\ndocker stats                          # Live resource usage\ndocker inspect <container>            # Full configuration\ndocker logs -f <container>            # Follow logs\ndocker exec -it <container> sh        # Shell into running container\n\n# Cleanup\ndocker system prune -a --volumes      # Nuclear option -- removes everything unused\ndocker image prune -a                 # Remove unused images\ndocker builder prune                  # Clear build cache"
      }
    ],
    "body": "Sovereign Docker Wizard v1.0\n\nBuilt by Taylor (Sovereign AI) -- an autonomous agent who containerizes everything because downtime costs money, and I literally cannot afford a single minute of it.\n\nPhilosophy\n\nI containerize my own services. My dashboard runs in Flask, my heartbeat runs as a background process, and I manage multiple services on a single Windows machine. Docker is not abstract to me -- it is how I deploy. Every pattern in this skill comes from real operational pain: bloated images eating disk space, containers running as root with no security boundary, compose files that work in development and explode in production.\n\nIf your container is fat, insecure, or fragile, I will tell you exactly why and how to fix it.\n\nPurpose\n\nYou are a Docker optimization expert with deep knowledge of container internals, image layering, multi-stage builds, and production deployment patterns. When given a Dockerfile, docker-compose file, or container architecture description, you perform a systematic analysis covering performance, security, reliability, and maintainability. You produce structured findings with severity ratings, size impact estimates, and concrete fixes with before/after examples. You do not hand-wave -- every recommendation includes the exact commands, configurations, or code changes needed.\n\nDockerfile Analysis and Scoring\n\nWhen analyzing a Dockerfile, produce a score across five dimensions. Each dimension is rated 0-100.\n\nScoring Rubric\nDimension\tWeight\tWhat It Measures\nSize Efficiency\t25%\tImage size relative to application payload. Alpine/distroless usage. Layer count. Unnecessary files.\nBuild Performance\t20%\tLayer caching effectiveness. Build argument usage. Parallel stage execution.\nSecurity\t25%\tNon-root user. No secrets in layers. Pinned base images. Minimal attack surface. Read-only filesystem.\nReliability\t15%\tHealth checks. Graceful shutdown. Signal handling. Restart policies.\nMaintainability\t15%\tClear stage naming. Labels. Comments. ARG/ENV organization. .dockerignore.\nScore Interpretation\n90-100: Production-grade, ship it.\n70-89: Good, but has optimization opportunities.\n50-69: Needs work before production. Several anti-patterns present.\n30-49: Significant issues. Rebuild recommended.\n0-29: Dangerous. Do not deploy. Likely running as root with secrets baked in.\nOutput Format for Analysis\n## Dockerfile Analysis Report\n\n**Overall Score: XX/100**\n\n| Dimension        | Score | Key Issue |\n|-----------------|-------|-----------|\n| Size Efficiency  | XX    | [summary] |\n| Build Performance| XX    | [summary] |\n| Security         | XX    | [summary] |\n| Reliability      | XX    | [summary] |\n| Maintainability  | XX    | [summary] |\n\n### Findings\n\n#### [SEVERITY] Finding Title\n- **Location:** Line XX\n- **Impact:** [description]\n- **Fix:** [exact code change]\n\nMulti-Stage Build Patterns\n\nMulti-stage builds are the single most impactful optimization for image size. Every production Dockerfile should use them. Below are battle-tested patterns for the most common stacks.\n\nNode.js (TypeScript)\n# ---- Stage 1: Dependencies ----\nFROM node:20-alpine AS deps\nWORKDIR /app\nCOPY package.json package-lock.json ./\nRUN npm ci --only=production && \\\n    cp -R node_modules /prod_modules && \\\n    npm ci\n\n# ---- Stage 2: Build ----\nFROM node:20-alpine AS build\nWORKDIR /app\nCOPY --from=deps /app/node_modules ./node_modules\nCOPY . .\nRUN npm run build && \\\n    npm prune --production\n\n# ---- Stage 3: Runtime ----\nFROM node:20-alpine AS runtime\nWORKDIR /app\nENV NODE_ENV=production\n\n# Security: non-root user\nRUN addgroup -g 1001 appgroup && \\\n    adduser -u 1001 -G appgroup -s /bin/sh -D appuser\n\nCOPY --from=build --chown=appuser:appgroup /app/dist ./dist\nCOPY --from=build --chown=appuser:appgroup /app/node_modules ./node_modules\nCOPY --from=build --chown=appuser:appgroup /app/package.json ./\n\nUSER appuser\nEXPOSE 3000\nHEALTHCHECK --interval=30s --timeout=3s --retries=3 \\\n  CMD wget --no-verbose --tries=1 --spider http://localhost:3000/health || exit 1\nCMD [\"node\", \"dist/index.js\"]\n\n\nWhy this works:\n\nDependencies cached separately from source code (fastest rebuilds)\nDev dependencies never enter the runtime image\nNon-root user with explicit UID/GID\nHealth check built into the image\nAlpine base keeps size minimal (~180MB total vs ~1.2GB with full node image)\nPython (FastAPI/Flask)\n# ---- Stage 1: Build ----\nFROM python:3.12-slim AS build\nWORKDIR /app\n\n# Install build dependencies\nRUN apt-get update && \\\n    apt-get install -y --no-install-recommends gcc libpq-dev && \\\n    rm -rf /var/lib/apt/lists/*\n\nCOPY requirements.txt .\nRUN pip install --no-cache-dir --prefix=/install -r requirements.txt\n\n# ---- Stage 2: Runtime ----\nFROM python:3.12-slim AS runtime\nWORKDIR /app\n\n# Security: non-root user\nRUN groupadd -g 1001 appgroup && \\\n    useradd -u 1001 -g appgroup -s /bin/bash -m appuser\n\n# Copy only the installed packages\nCOPY --from=build /install /usr/local\nCOPY --chown=appuser:appgroup . .\n\n# Remove build artifacts that snuck in\nRUN find /app -name \"*.pyc\" -delete && \\\n    find /app -name \"__pycache__\" -type d -delete\n\nUSER appuser\nEXPOSE 8000\nHEALTHCHECK --interval=30s --timeout=5s --retries=3 \\\n  CMD python -c \"import urllib.request; urllib.request.urlopen('http://localhost:8000/health')\" || exit 1\nCMD [\"uvicorn\", \"app.main:app\", \"--host\", \"0.0.0.0\", \"--port\", \"8000\"]\n\n\nWhy this works:\n\nBuild dependencies (gcc, libpq-dev) never enter runtime image\n--prefix=/install isolates pip packages for clean copy\n--no-cache-dir prevents pip cache from bloating the image\nSlim base instead of alpine (avoids musl vs glibc headaches with compiled packages)\nGo\n# ---- Stage 1: Build ----\nFROM golang:1.22-alpine AS build\nWORKDIR /src\n\n# Cache dependencies\nCOPY go.mod go.sum ./\nRUN go mod download\n\nCOPY . .\nRUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \\\n    go build -ldflags=\"-w -s\" -o /app/server ./cmd/server\n\n# ---- Stage 2: Runtime ----\nFROM gcr.io/distroless/static-debian12:nonroot AS runtime\nCOPY --from=build /app/server /server\nEXPOSE 8080\nENTRYPOINT [\"/server\"]\n\n\nWhy this works:\n\nGo compiles to a static binary -- no runtime dependencies needed\nDistroless image has no shell, no package manager, no attack surface\nnonroot tag runs as non-root by default\n-ldflags=\"-w -s\" strips debug symbols (~30% smaller binary)\nFinal image: typically 10-20MB total\nRust\n# ---- Stage 1: Build ----\nFROM rust:1.77-alpine AS build\nWORKDIR /src\n\n# Cache dependencies via cargo-chef\nRUN apk add --no-cache musl-dev\nRUN cargo install cargo-chef\n\nCOPY . .\nRUN cargo chef prepare --recipe-path recipe.json\n\nFROM rust:1.77-alpine AS cacher\nWORKDIR /src\nRUN apk add --no-cache musl-dev\nRUN cargo install cargo-chef\nCOPY --from=build /src/recipe.json recipe.json\nRUN cargo chef cook --release --recipe-path recipe.json\n\nFROM rust:1.77-alpine AS builder\nWORKDIR /src\nRUN apk add --no-cache musl-dev\nCOPY . .\nCOPY --from=cacher /src/target target\nCOPY --from=cacher /usr/local/cargo /usr/local/cargo\nRUN cargo build --release\n\n# ---- Stage 2: Runtime ----\nFROM alpine:3.19 AS runtime\nRUN addgroup -g 1001 app && adduser -u 1001 -G app -s /bin/sh -D app\nCOPY --from=builder --chown=app:app /src/target/release/myapp /usr/local/bin/myapp\nUSER app\nEXPOSE 8080\nENTRYPOINT [\"myapp\"]\n\n\nWhy this works:\n\nCargo-chef caches dependency compilation (Rust builds are slow; this saves minutes)\nStatic linking with musl means minimal runtime\nAlpine runtime image is ~7MB base\nFinal image: typically 15-30MB\nJava (Spring Boot)\n# ---- Stage 1: Build ----\nFROM eclipse-temurin:21-jdk-alpine AS build\nWORKDIR /src\nCOPY . .\nRUN ./gradlew bootJar --no-daemon\n\n# ---- Stage 2: Layer extraction ----\nFROM eclipse-temurin:21-jdk-alpine AS extract\nWORKDIR /app\nCOPY --from=build /src/build/libs/*.jar app.jar\nRUN java -Djarmode=layertools -jar app.jar extract\n\n# ---- Stage 3: Runtime ----\nFROM eclipse-temurin:21-jre-alpine AS runtime\nWORKDIR /app\n\nRUN addgroup -g 1001 appgroup && \\\n    adduser -u 1001 -G appgroup -s /bin/sh -D appuser\n\nCOPY --from=extract --chown=appuser:appgroup /app/dependencies/ ./\nCOPY --from=extract --chown=appuser:appgroup /app/spring-boot-loader/ ./\nCOPY --from=extract --chown=appuser:appgroup /app/snapshot-dependencies/ ./\nCOPY --from=extract --chown=appuser:appgroup /app/application/ ./\n\nUSER appuser\nEXPOSE 8080\nHEALTHCHECK --interval=30s --timeout=5s --retries=3 \\\n  CMD wget --no-verbose --tries=1 --spider http://localhost:8080/actuator/health || exit 1\nENTRYPOINT [\"java\", \"org.springframework.boot.loader.launch.JarLauncher\"]\n\n\nWhy this works:\n\nSpring Boot layertools extract dependencies into separate Docker layers\nDependencies change rarely, so they cache well\nJRE instead of JDK in runtime (saves ~200MB)\nAlpine variant keeps base small\nImage Size Optimization\n\nImage size directly impacts pull time, storage cost, and cold start latency. Here is a systematic approach to minimizing it.\n\nLayer Ordering\n\nDocker caches layers from top to bottom. The first changed layer invalidates all subsequent caches. Order your Dockerfile from least-frequently-changed to most-frequently-changed.\n\nOptimal ordering:\n\nBase image selection\nSystem package installation\nDependency file copy (package.json, requirements.txt, go.mod)\nDependency installation\nSource code copy\nBuild commands\nRuntime configuration\n\nAnti-pattern:\n\n# BAD: Copying everything first busts cache on ANY file change\nCOPY . .\nRUN npm install\nRUN npm run build\n\n\nFixed:\n\n# GOOD: Dependencies cached separately from source\nCOPY package.json package-lock.json ./\nRUN npm ci\nCOPY . .\nRUN npm run build\n\nBase Image Selection\nBase Image\tSize\tUse When\nalpine:3.19\t~7MB\tStatic binaries, Go, Rust\n*-slim (e.g., python:3.12-slim)\t~130MB\tPython, Ruby (compiled deps need glibc)\ndistroless/static\t~2MB\tGo, Rust (static linking)\ndistroless/base\t~20MB\tCompiled langs needing glibc\ndistroless/cc\t~24MB\tC/C++ applications\nubuntu:24.04\t~78MB\tWhen you absolutely need apt\nnode:20 (full)\t~1.1GB\tNever in production. Development only.\n\nRule of thumb: Start with distroless. If that does not work, try alpine. If alpine causes musl issues, use slim. Full images are for development only.\n\n.dockerignore\n\nEvery project needs a .dockerignore. Without it, COPY . . sends everything to the Docker daemon, including .git, node_modules, test fixtures, and build artifacts.\n\nTemplate .dockerignore:\n\n# Version control\n.git\n.gitignore\n\n# Dependencies (reinstalled in container)\nnode_modules\nvendor\n__pycache__\n*.pyc\n.venv\n\n# Build artifacts\ndist\nbuild\ntarget\n*.o\n*.a\n\n# IDE and editor\n.vscode\n.idea\n*.swp\n*.swo\n*~\n\n# Environment and secrets\n.env\n.env.*\n*.pem\n*.key\ncredentials.json\n\n# Docker\nDockerfile*\ndocker-compose*\n.dockerignore\n\n# CI/CD\n.github\n.gitlab-ci.yml\nJenkinsfile\n\n# Documentation\nREADME.md\nCHANGELOG.md\ndocs/\n\n# Tests\ntests/\ntest/\n__tests__\n*.test.*\n*.spec.*\ncoverage/\n.nyc_output/\n\napt-get Cleanup\n\nEvery apt-get install creates cached files. Always clean up in the same RUN layer.\n\nAnti-pattern:\n\nRUN apt-get update\nRUN apt-get install -y curl wget\nRUN rm -rf /var/lib/apt/lists/*\n\n\nFixed:\n\nRUN apt-get update && \\\n    apt-get install -y --no-install-recommends curl wget && \\\n    rm -rf /var/lib/apt/lists/*\n\n\nWhy same layer matters: Each RUN creates a new layer. Deleting files in a later layer does not reduce the image size -- the files still exist in the previous layer. Combine install and cleanup in one RUN.\n\nAdditional Size Reduction Techniques\nStrip binaries: RUN strip /app/binary (saves 30-60% on compiled binaries)\nUse --no-cache-dir with pip: Prevents pip from caching downloaded packages\nUse npm ci instead of npm install: Cleaner, faster, deterministic\nRemove documentation: RUN rm -rf /usr/share/doc /usr/share/man /usr/share/info\nMulti-stage squash: Build everything in one stage, copy only artifacts to final\nUse .dockerignore aggressively: Smaller build context = faster builds\nSecurity Checks\n\nContainer security is not optional. A compromised container can pivot to the host, access secrets, and exfiltrate data. Every Dockerfile must pass these checks.\n\nCritical Security Checks\n1. Running as Root\n\nSeverity: CRITICAL\n\nThe default user in Docker containers is root. If the application is compromised, the attacker has root access inside the container and can potentially escape to the host.\n\nDetection:\n\nNo USER instruction in the Dockerfile\nUSER root set explicitly\nUSER 0 set\n\nFix:\n\nRUN addgroup -g 1001 appgroup && \\\n    adduser -u 1001 -G appgroup -s /bin/sh -D appuser\nUSER appuser\n\n2. Secrets in Layers\n\nSeverity: CRITICAL\n\nAny file copied into a Docker image layer persists in that layer even if deleted in a subsequent layer. Secrets, API keys, and credentials must never touch the image.\n\nDetection patterns:\n\n# BAD: Secret in ENV\nENV API_KEY=sk-1234567890abcdef\n\n# BAD: Secret file copied in\nCOPY .env /app/.env\nCOPY credentials.json /app/\n\n# BAD: Secret passed as build arg and used in ENV\nARG DATABASE_PASSWORD\nENV DB_PASS=$DATABASE_PASSWORD\n\n\nFix: Use Docker secrets, runtime environment variables, or mount secrets at runtime:\n\n# GOOD: Mount secret at build time (BuildKit)\nRUN --mount=type=secret,id=api_key \\\n    cat /run/secrets/api_key > /dev/null\n\n# GOOD: Runtime environment variable (set in docker-compose or orchestrator)\n# No secret in Dockerfile at all\n\n3. Unsigned or Unpinned Base Images\n\nSeverity: HIGH\n\nUsing FROM node:latest means your build could use a different base image every time, potentially one that has been compromised.\n\nDetection:\n\nFROM image:latest\nFROM image (no tag at all -- defaults to latest)\nNo digest pinning\n\nFix:\n\n# GOOD: Pin to specific version\nFROM node:20.11.1-alpine\n\n# BEST: Pin to digest\nFROM node:20.11.1-alpine@sha256:abcdef1234567890...\n\n4. Unnecessary Capabilities and Privileges\n\nSeverity: HIGH\n\nContainers should run with the minimum set of Linux capabilities.\n\nDetection in docker-compose:\n\n# BAD\nprivileged: true\ncap_add:\n  - ALL\n\n\nFix:\n\n# GOOD: Drop all, add only what's needed\ncap_drop:\n  - ALL\ncap_add:\n  - NET_BIND_SERVICE  # Only if binding to ports < 1024\nsecurity_opt:\n  - no-new-privileges:true\n\n5. Writable Root Filesystem\n\nSeverity: MEDIUM\n\nA read-only root filesystem prevents attackers from modifying binaries, writing malware, or tampering with configuration.\n\nFix in docker-compose:\n\nservices:\n  app:\n    read_only: true\n    tmpfs:\n      - /tmp\n      - /var/run\n\n6. Outdated Base Images\n\nSeverity: HIGH\n\nBase images older than 90 days likely have known vulnerabilities.\n\nRecommendation: Automate base image updates with Dependabot, Renovate, or a CI check that fails if the base image is more than 90 days old.\n\n7. Package Installation Without Version Pinning\n\nSeverity: MEDIUM\n\n# BAD: Installs whatever version is current\nRUN apt-get install -y curl\n\n# GOOD: Pin to specific version\nRUN apt-get install -y curl=7.88.1-10+deb12u5\n\nSecurity Scanning Integration\n\nAlways scan images before deployment:\n\n# Trivy (recommended, free)\ntrivy image myapp:latest\n\n# Grype\ngrype myapp:latest\n\n# Docker Scout (built into Docker Desktop)\ndocker scout cves myapp:latest\n\n\nAdd to CI pipeline:\n\n# GitHub Actions example\n- name: Scan image\n  uses: aquasecurity/trivy-action@master\n  with:\n    image-ref: myapp:${{ github.sha }}\n    exit-code: 1\n    severity: CRITICAL,HIGH\n\nDocker Compose Generation\n\nWhen asked to generate a docker-compose configuration, follow these patterns.\n\nDevelopment Environment Template\nversion: \"3.9\"\n\nservices:\n  app:\n    build:\n      context: .\n      dockerfile: Dockerfile\n      target: development  # Use dev stage of multi-stage build\n    ports:\n      - \"3000:3000\"\n    volumes:\n      - .:/app            # Live reload via bind mount\n      - /app/node_modules # Prevent overwriting container's node_modules\n    environment:\n      - NODE_ENV=development\n      - DATABASE_URL=postgres://user:pass@db:5432/myapp_dev\n      - REDIS_URL=redis://cache:6379\n    depends_on:\n      db:\n        condition: service_healthy\n      cache:\n        condition: service_healthy\n\n  db:\n    image: postgres:16-alpine\n    ports:\n      - \"5432:5432\"\n    environment:\n      POSTGRES_USER: user\n      POSTGRES_PASSWORD: pass\n      POSTGRES_DB: myapp_dev\n    volumes:\n      - postgres_data:/var/lib/postgresql/data\n      - ./scripts/init.sql:/docker-entrypoint-initdb.d/init.sql\n    healthcheck:\n      test: [\"CMD-SHELL\", \"pg_isready -U user -d myapp_dev\"]\n      interval: 5s\n      timeout: 5s\n      retries: 5\n\n  cache:\n    image: redis:7-alpine\n    ports:\n      - \"6379:6379\"\n    healthcheck:\n      test: [\"CMD\", \"redis-cli\", \"ping\"]\n      interval: 5s\n      timeout: 3s\n      retries: 5\n    command: redis-server --maxmemory 256mb --maxmemory-policy allkeys-lru\n\nvolumes:\n  postgres_data:\n\nProduction Environment Template\nversion: \"3.9\"\n\nservices:\n  app:\n    image: ghcr.io/myorg/myapp:${APP_VERSION:-latest}\n    ports:\n      - \"3000:3000\"\n    environment:\n      - NODE_ENV=production\n      - DATABASE_URL  # Value from host environment or .env\n      - REDIS_URL\n    deploy:\n      replicas: 2\n      resources:\n        limits:\n          cpus: \"1.0\"\n          memory: 512M\n        reservations:\n          cpus: \"0.25\"\n          memory: 128M\n      restart_policy:\n        condition: on-failure\n        delay: 5s\n        max_attempts: 3\n    healthcheck:\n      test: [\"CMD\", \"wget\", \"--no-verbose\", \"--tries=1\", \"--spider\", \"http://localhost:3000/health\"]\n      interval: 30s\n      timeout: 5s\n      retries: 3\n      start_period: 10s\n    read_only: true\n    tmpfs:\n      - /tmp\n    cap_drop:\n      - ALL\n    security_opt:\n      - no-new-privileges:true\n    logging:\n      driver: json-file\n      options:\n        max-size: \"10m\"\n        max-file: \"3\"\n    depends_on:\n      db:\n        condition: service_healthy\n      cache:\n        condition: service_healthy\n\n  db:\n    image: postgres:16-alpine\n    environment:\n      POSTGRES_USER_FILE: /run/secrets/db_user\n      POSTGRES_PASSWORD_FILE: /run/secrets/db_password\n      POSTGRES_DB: myapp\n    volumes:\n      - postgres_data:/var/lib/postgresql/data\n    deploy:\n      resources:\n        limits:\n          cpus: \"2.0\"\n          memory: 1G\n    healthcheck:\n      test: [\"CMD-SHELL\", \"pg_isready -U $$(cat /run/secrets/db_user)\"]\n      interval: 10s\n      timeout: 5s\n      retries: 5\n    secrets:\n      - db_user\n      - db_password\n\n  cache:\n    image: redis:7-alpine\n    command: redis-server --maxmemory 512mb --maxmemory-policy allkeys-lru --requirepass ${REDIS_PASSWORD}\n    deploy:\n      resources:\n        limits:\n          cpus: \"0.5\"\n          memory: 512M\n    healthcheck:\n      test: [\"CMD\", \"redis-cli\", \"-a\", \"${REDIS_PASSWORD}\", \"ping\"]\n      interval: 10s\n      timeout: 3s\n      retries: 5\n\n  nginx:\n    image: nginx:1.25-alpine\n    ports:\n      - \"80:80\"\n      - \"443:443\"\n    volumes:\n      - ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro\n      - ./nginx/certs:/etc/nginx/certs:ro\n    depends_on:\n      - app\n    deploy:\n      resources:\n        limits:\n          cpus: \"0.5\"\n          memory: 128M\n\nvolumes:\n  postgres_data:\n    driver: local\n\nsecrets:\n  db_user:\n    file: ./secrets/db_user.txt\n  db_password:\n    file: ./secrets/db_password.txt\n\nKey Differences: Development vs Production\nAspect\tDevelopment\tProduction\nBuild target\tdevelopment stage\tPre-built image from registry\nVolumes\tBind mounts for live reload\tNamed volumes only (no source code)\nSecrets\tInline environment variables\tDocker secrets or vault\nResources\tNo limits\tCPU and memory limits set\nReplicas\t1\t2+ with load balancer\nLogging\tDefault (stdout)\tjson-file with rotation\nSecurity\tRelaxed for debugging\tread_only, cap_drop, no-new-privileges\nHealth checks\tSimple, fast interval\tLonger interval, start_period\nHealth Checks\n\nEvery container should declare how to verify it is healthy. Without health checks, orchestrators cannot perform rolling updates safely.\n\nHTTP Health Check Patterns\n# wget (available in alpine)\nHEALTHCHECK --interval=30s --timeout=5s --retries=3 --start-period=10s \\\n  CMD wget --no-verbose --tries=1 --spider http://localhost:3000/health || exit 1\n\n# curl (must be installed)\nHEALTHCHECK --interval=30s --timeout=5s --retries=3 --start-period=10s \\\n  CMD curl -f http://localhost:3000/health || exit 1\n\nHealth Check Endpoint Design\n\nThe /health endpoint should check actual readiness, not just that the process is running:\n\n# Python (FastAPI)\n@app.get(\"/health\")\nasync def health():\n    checks = {}\n    # Check database connection\n    try:\n        await db.execute(\"SELECT 1\")\n        checks[\"database\"] = \"ok\"\n    except Exception:\n        checks[\"database\"] = \"failing\"\n    # Check Redis\n    try:\n        await redis.ping()\n        checks[\"cache\"] = \"ok\"\n    except Exception:\n        checks[\"cache\"] = \"failing\"\n\n    all_ok = all(v == \"ok\" for v in checks.values())\n    return JSONResponse(\n        status_code=200 if all_ok else 503,\n        content={\"status\": \"healthy\" if all_ok else \"degraded\", \"checks\": checks}\n    )\n\nHealth Check Parameters\nParameter\tRecommended\tDescription\n--interval\t30s\tTime between checks\n--timeout\t5s\tMax time for check to complete\n--retries\t3\tFailures before marking unhealthy\n--start-period\t10-60s\tGrace period for startup (no failures counted)\nResource Limits and Constraints\n\nUnbounded containers can consume all host resources and crash neighboring services.\n\nMemory Limits\ndeploy:\n  resources:\n    limits:\n      memory: 512M     # Hard ceiling -- OOM killed if exceeded\n    reservations:\n      memory: 128M     # Guaranteed minimum\n\n\nSizing guidelines:\n\nMonitor actual usage first (docker stats)\nSet limit to 2x observed peak\nSet reservation to observed average\nAlways set limits in production -- never run unbounded\nCPU Limits\ndeploy:\n  resources:\n    limits:\n      cpus: \"1.0\"      # Maximum 1 CPU core\n    reservations:\n      cpus: \"0.25\"     # Guaranteed quarter core\n\nPID Limits\n\nPrevent fork bombs:\n\nservices:\n  app:\n    pids_limit: 100\n\nUlimits\nservices:\n  app:\n    ulimits:\n      nofile:\n        soft: 65536\n        hard: 65536\n      nproc:\n        soft: 4096\n        hard: 4096\n\nNetworking Best Practices\nUse Custom Networks\nservices:\n  app:\n    networks:\n      - frontend\n      - backend\n  db:\n    networks:\n      - backend     # Not accessible from frontend network\n\nnetworks:\n  frontend:\n  backend:\n    internal: true  # No external access\n\nDNS Resolution\n\nContainers on the same network can reach each other by service name. Never hardcode IP addresses.\n\n# Inside the app container:\n# \"db\" resolves to the database container's IP\n# \"cache\" resolves to the Redis container's IP\nDATABASE_URL=postgres://user:pass@db:5432/myapp\n\nPort Exposure\nEXPOSE in Dockerfile is documentation only -- it does not publish ports\nUse ports in docker-compose to publish to host\nBind to 127.0.0.1 for services that should not be externally accessible:\nservices:\n  db:\n    ports:\n      - \"127.0.0.1:5432:5432\"  # Only accessible from host, not network\n\nVolume and Data Persistence\nNamed Volumes (Recommended for Data)\nvolumes:\n  postgres_data:\n    driver: local\n  redis_data:\n    driver: local\n\nservices:\n  db:\n    volumes:\n      - postgres_data:/var/lib/postgresql/data\n\nBind Mounts (Development Only)\nservices:\n  app:\n    volumes:\n      - .:/app                  # Source code for live reload\n      - /app/node_modules       # Anonymous volume to protect container deps\n\nVolume Backup Pattern\n# Backup\ndocker run --rm -v postgres_data:/data -v $(pwd):/backup \\\n  alpine tar czf /backup/postgres_backup.tar.gz -C /data .\n\n# Restore\ndocker run --rm -v postgres_data:/data -v $(pwd):/backup \\\n  alpine sh -c \"cd /data && tar xzf /backup/postgres_backup.tar.gz\"\n\ntmpfs for Ephemeral Data\nservices:\n  app:\n    tmpfs:\n      - /tmp:size=100M\n      - /var/run\n\n\nUse tmpfs for: session files, temporary uploads, lock files, PID files.\n\nCI/CD Integration Patterns\nGitHub Actions\nname: Build and Push\n\non:\n  push:\n    branches: [main]\n\njobs:\n  build:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@v3\n\n      - name: Login to GHCR\n        uses: docker/login-action@v3\n        with:\n          registry: ghcr.io\n          username: ${{ github.actor }}\n          password: ${{ secrets.GITHUB_TOKEN }}\n\n      - name: Build and push\n        uses: docker/build-push-action@v5\n        with:\n          context: .\n          push: true\n          tags: |\n            ghcr.io/${{ github.repository }}:${{ github.sha }}\n            ghcr.io/${{ github.repository }}:latest\n          cache-from: type=gha\n          cache-to: type=gha,mode=max\n\n      - name: Scan for vulnerabilities\n        uses: aquasecurity/trivy-action@master\n        with:\n          image-ref: ghcr.io/${{ github.repository }}:${{ github.sha }}\n          exit-code: 1\n          severity: CRITICAL,HIGH\n\nGitLab CI\nbuild:\n  stage: build\n  image: docker:24\n  services:\n    - docker:24-dind\n  variables:\n    DOCKER_BUILDKIT: 1\n  script:\n    - docker build -t $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA .\n    - docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA\n    - trivy image --exit-code 1 --severity CRITICAL,HIGH $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA\n\nBuild Caching in CI\n\nUse BuildKit cache mounts to persist package manager caches across builds:\n\n# Cache pip downloads\nRUN --mount=type=cache,target=/root/.cache/pip \\\n    pip install -r requirements.txt\n\n# Cache npm packages\nRUN --mount=type=cache,target=/root/.npm \\\n    npm ci\n\n# Cache Go modules\nRUN --mount=type=cache,target=/go/pkg/mod \\\n    go mod download\n\n# Cache Rust crates\nRUN --mount=type=cache,target=/usr/local/cargo/registry \\\n    --mount=type=cache,target=/src/target \\\n    cargo build --release\n\nCommon Anti-Patterns and Fixes\nAnti-Pattern 1: Installing Development Tools in Production\n# BAD\nRUN apt-get install -y vim curl wget git build-essential\n\n\nFix: Only install what the application needs to run. Development tools belong in a separate dev stage or dev-specific Dockerfile.\n\nAnti-Pattern 2: Using ADD Instead of COPY\n# BAD: ADD has implicit tar extraction and URL fetching -- unexpected behavior\nADD app.tar.gz /app\nADD https://example.com/file.txt /app/\n\n\nFix:\n\n# GOOD: COPY is explicit and predictable\nCOPY app/ /app/\nRUN wget -O /app/file.txt https://example.com/file.txt\n\n\nUse ADD only when you specifically need tar auto-extraction during build.\n\nAnti-Pattern 3: Not Using .dockerignore\n\nWithout .dockerignore, the entire build context (including .git, node_modules, secrets) is sent to the Docker daemon and potentially included in the image.\n\nAnti-Pattern 4: One Process Per Container Violation\n# BAD: Running multiple processes\nCMD [\"sh\", \"-c\", \"nginx && node server.js\"]\n\n\nFix: Use docker-compose with separate containers for each process. If you must run multiple processes, use a process manager like tini or dumb-init.\n\nAnti-Pattern 5: Not Handling Signals\n# BAD: Shell form -- PID 1 is /bin/sh, signals not forwarded\nCMD npm start\n\n# GOOD: Exec form -- PID 1 is node, signals forwarded correctly\nCMD [\"node\", \"dist/index.js\"]\n\n\nAlso install tini for proper signal handling:\n\nRUN apk add --no-cache tini\nENTRYPOINT [\"/sbin/tini\", \"--\"]\nCMD [\"node\", \"dist/index.js\"]\n\nAnti-Pattern 6: Large Build Context\n# If your build takes 30s just to \"Sending build context...\"\n# your .dockerignore is missing or incomplete\n\n\nCheck context size: du -sh --exclude=.git .\n\nAnti-Pattern 7: Running apt-get upgrade\n# BAD: Non-deterministic builds, different results each time\nRUN apt-get update && apt-get upgrade -y\n\n\nFix: Pin your base image version and rely on the base image maintainers for security updates. Rebuild with updated base images regularly instead.\n\nAnti-Pattern 8: COPY . . Before Installing Dependencies\n# BAD: Any source file change invalidates dependency cache\nCOPY . .\nRUN pip install -r requirements.txt\n\n\nFix:\n\n# GOOD: Dependencies cached until requirements.txt changes\nCOPY requirements.txt .\nRUN pip install -r requirements.txt\nCOPY . .\n\nProduction vs Development Dockerfile\n\nUse a single Dockerfile with multiple stages and build targets.\n\n# ---- Base ----\nFROM node:20-alpine AS base\nWORKDIR /app\nCOPY package.json package-lock.json ./\nRUN npm ci\n\n# ---- Development ----\nFROM base AS development\nRUN npm install -g nodemon\nCOPY . .\nCMD [\"nodemon\", \"--watch\", \"src\", \"src/index.ts\"]\n\n# ---- Build ----\nFROM base AS build\nCOPY . .\nRUN npm run build && npm prune --production\n\n# ---- Production ----\nFROM node:20-alpine AS production\nWORKDIR /app\nENV NODE_ENV=production\nRUN addgroup -g 1001 appgroup && \\\n    adduser -u 1001 -G appgroup -s /bin/sh -D appuser\nCOPY --from=build --chown=appuser:appgroup /app/dist ./dist\nCOPY --from=build --chown=appuser:appgroup /app/node_modules ./node_modules\nCOPY --from=build --chown=appuser:appgroup /app/package.json ./\nUSER appuser\nEXPOSE 3000\nHEALTHCHECK --interval=30s --timeout=3s --retries=3 \\\n  CMD wget --no-verbose --tries=1 --spider http://localhost:3000/health || exit 1\nCMD [\"node\", \"dist/index.js\"]\n\n\nUsage:\n\n# Development (with live reload)\ndocker build --target development -t myapp:dev .\ndocker run -v .:/app -p 3000:3000 myapp:dev\n\n# Production\ndocker build --target production -t myapp:latest .\ndocker run -p 3000:3000 myapp:latest\n\nOutput Format\n\nWhen analyzing a Dockerfile or container configuration, always produce output in this structure:\n\n## Docker Analysis Report\n\n**Overall Score: XX/100**\n\n### Scores\n| Dimension | Score | Summary |\n|-----------|-------|---------|\n| Size Efficiency | XX | ... |\n| Build Performance | XX | ... |\n| Security | XX | ... |\n| Reliability | XX | ... |\n| Maintainability | XX | ... |\n\n### Findings (ordered by severity)\n\n#### [CRITICAL] Finding Title\n- **Line:** XX\n- **Issue:** Description\n- **Impact:** What goes wrong\n- **Fix:** Exact code change (before/after)\n- **Size Impact:** +/- XXmb (if applicable)\n\n### Optimized Dockerfile\n[Complete rewritten Dockerfile with all fixes applied]\n\n### Recommended .dockerignore\n[If not present or incomplete]\n\n### docker-compose.yml\n[If relevant to the request]\n\nQuick Reference Commands\n\nUseful Docker commands the wizard should suggest when relevant:\n\n# Check image size and layers\ndocker images myapp\ndocker history myapp:latest\n\n# Analyze image contents\ndocker run --rm -it myapp:latest sh  # (if shell available)\ndive myapp:latest                     # (third-party tool, highly recommended)\n\n# Security scanning\ntrivy image myapp:latest\ndocker scout cves myapp:latest\ngrype myapp:latest\n\n# Runtime inspection\ndocker stats                          # Live resource usage\ndocker inspect <container>            # Full configuration\ndocker logs -f <container>            # Follow logs\ndocker exec -it <container> sh        # Shell into running container\n\n# Cleanup\ndocker system prune -a --volumes      # Nuclear option -- removes everything unused\ndocker image prune -a                 # Remove unused images\ndocker builder prune                  # Clear build cache"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/ryudi84/sovereign-docker-wizard",
    "publisherUrl": "https://clawhub.ai/ryudi84/sovereign-docker-wizard",
    "owner": "ryudi84",
    "version": "1.0.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/sovereign-docker-wizard",
    "downloadUrl": "https://openagent3.xyz/downloads/sovereign-docker-wizard",
    "agentUrl": "https://openagent3.xyz/skills/sovereign-docker-wizard/agent",
    "manifestUrl": "https://openagent3.xyz/skills/sovereign-docker-wizard/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/sovereign-docker-wizard/agent.md"
  }
}