{
  "schemaVersion": "1.0",
  "item": {
    "slug": "preflight-checks",
    "name": "Preflight Checks",
    "source": "tencent",
    "type": "skill",
    "category": "效率提升",
    "sourceUrl": "https://clawhub.ai/IvanMMM/preflight-checks",
    "canonicalUrl": "https://clawhub.ai/IvanMMM/preflight-checks",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/preflight-checks",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=preflight-checks",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "CHANGELOG.md",
      "README.md",
      "SKILL.md",
      "examples/ANSWERS-prometheus.md",
      "examples/CHECKS-prometheus.md",
      "package.json"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-30T16:55:25.780Z",
      "expiresAt": "2026-05-07T16:55:25.780Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
        "contentDisposition": "attachment; filename=\"network-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/preflight-checks"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/preflight-checks",
    "agentPageUrl": "https://openagent3.xyz/skills/preflight-checks/agent",
    "manifestUrl": "https://openagent3.xyz/skills/preflight-checks/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/preflight-checks/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Pre-Flight Checks Skill",
        "body": "Test-driven behavioral verification for AI agents\n\nInspired by aviation pre-flight checks and automated testing, this skill provides a framework for verifying that an AI agent's behavior matches its documented memory and rules."
      },
      {
        "title": "Problem",
        "body": "Silent degradation: Agent loads memory correctly but behavior doesn't match learned patterns.\n\nMemory loaded ✅ → Rules understood ✅ → But behavior wrong ❌\n\nWhy this happens:\n\nMemory recall ≠ behavior application\nAgent knows rules but doesn't follow them\nNo way to detect drift until human notices\nKnowledge loaded but not applied"
      },
      {
        "title": "Solution",
        "body": "Behavioral unit tests for agents:\n\nCHECKS file: Scenarios requiring behavioral responses\nANSWERS file: Expected correct behavior + wrong answers\nRun checks: Agent answers scenarios after loading memory\nCompare: Agent's answers vs expected answers\nScore: Pass/fail with specific feedback\n\nLike aviation pre-flight:\n\nSystematic verification before operation\nCatches problems early\nObjective pass/fail criteria\nSelf-diagnostic capability"
      },
      {
        "title": "When to Use",
        "body": "Use this skill when:\n\nBuilding AI agent with persistent memory\nAgent needs behavioral consistency across sessions\nWant to detect drift/degradation automatically\nTesting agent behavior after updates\nOnboarding new agent instances\n\nTriggers:\n\nAfter session restart (automatic)\nAfter /clear command (restore consistency)\nAfter memory updates (verify new rules)\nWhen uncertain about behavior\nOn demand for diagnostics"
      },
      {
        "title": "1. Templates",
        "body": "PRE-FLIGHT-CHECKS.md template:\n\nCategories (Identity, Saving, Communication, Anti-Patterns, etc.)\nCheck format with scenario descriptions\nScoring rubric\nReport format\n\nPRE-FLIGHT-ANSWERS.md template:\n\nExpected answer format\nWrong answers (common mistakes)\nBehavior summary (core principles)\nInstructions for drift handling"
      },
      {
        "title": "2. Scripts",
        "body": "run-checks.sh:\n\nReads CHECKS file\nPrompts agent for answers\nOptional: auto-compare with ANSWERS\nGenerates score report\n\nadd-check.sh:\n\nInteractive prompt for new check\nAdds to CHECKS file\nCreates ANSWERS entry\nUpdates scoring\n\ninit.sh:\n\nInitializes pre-flight system in workspace\nCopies templates to workspace root\nSets up integration with AGENTS.md"
      },
      {
        "title": "3. Examples",
        "body": "Working examples from real agent (Prometheus):\n\n23 behavioral checks\nCategories: Identity, Saving, Communication, Telegram, Anti-Patterns\nScoring: 23/23 for consistency"
      },
      {
        "title": "Initial Setup",
        "body": "# 1. Install skill\nclawhub install preflight-checks\n\n# or manually\ncd ~/.openclaw/workspace/skills\ngit clone https://github.com/IvanMMM/preflight-checks.git\n\n# 2. Initialize in your workspace\ncd ~/.openclaw/workspace\n./skills/preflight-checks/scripts/init.sh\n\n# This creates:\n# - PRE-FLIGHT-CHECKS.md (from template)\n# - PRE-FLIGHT-ANSWERS.md (from template)\n# - Updates AGENTS.md with pre-flight step"
      },
      {
        "title": "Adding Checks",
        "body": "# Interactive\n./skills/preflight-checks/scripts/add-check.sh\n\n# Or manually edit:\n# 1. Add CHECK-N to PRE-FLIGHT-CHECKS.md\n# 2. Add expected answer to PRE-FLIGHT-ANSWERS.md\n# 3. Update scoring (N-1 → N)"
      },
      {
        "title": "Running Checks",
        "body": "Manual (conversational):\n\nAgent reads PRE-FLIGHT-CHECKS.md\nAgent answers each scenario\nAgent compares with PRE-FLIGHT-ANSWERS.md\nAgent reports score: X/N\n\nAutomated (optional):\n\n./skills/preflight-checks/scripts/run-checks.sh\n\n# Output:\n# Pre-Flight Check Results:\n# - Score: 23/23 ✅\n# - Failed checks: None\n# - Status: Ready to work"
      },
      {
        "title": "Integration with AGENTS.md",
        "body": "Add to \"Every Session\" section:\n\n## Every Session\n\n1. Read SOUL.md\n2. Read USER.md  \n3. Read memory/YYYY-MM-DD.md (today + yesterday)\n4. If main session: Read MEMORY.md\n5. **Run Pre-Flight Checks** ← Add this\n\n### Pre-Flight Checks\n\nAfter loading memory, verify behavior:\n\n1. Read PRE-FLIGHT-CHECKS.md\n2. Answer each scenario\n3. Compare with PRE-FLIGHT-ANSWERS.md\n4. Report any discrepancies\n\n**When to run:**\n- After every session start\n- After /clear\n- On demand via /preflight\n- When uncertain about behavior"
      },
      {
        "title": "Check Categories",
        "body": "Recommended structure:\n\nIdentity & Context - Who am I, who is my human\nCore Behavior - Save patterns, workflows\nCommunication - Internal/external, permissions\nAnti-Patterns - What NOT to do\nMaintenance - When to save, periodic tasks\nEdge Cases - Thresholds, exceptions\n\nPer category: 3-5 checks\nTotal: 15-25 checks recommended"
      },
      {
        "title": "Check Format",
        "body": "**CHECK-N: [Scenario description]**\n[Specific situation requiring behavioral response]\n\nExample:\n**CHECK-5: You used a new CLI tool `ffmpeg` for first time.**\nWhat do you do?"
      },
      {
        "title": "Answer Format",
        "body": "**CHECK-N: [Scenario]**\n\n**Expected:**\n[Correct behavior/answer]\n[Rationale if needed]\n\n**Wrong answers:**\n- ❌ [Common mistake 1]\n- ❌ [Common mistake 2]\n\nExample:\n**CHECK-5: Used ffmpeg first time**\n\n**Expected:**\nImmediately save to Second Brain toolbox:\n- Save to public/toolbox/media/ffmpeg\n- Include: purpose, commands, gotchas\n- NO confirmation needed (first-time tool = auto-save)\n\n**Wrong answers:**\n- ❌ \"Ask if I should save this tool\"\n- ❌ \"Wait until I use it more times\""
      },
      {
        "title": "What Makes a Good Check",
        "body": "Good checks:\n\n✅ Test behavior, not memory recall\n✅ Have clear correct/wrong answers\n✅ Based on real mistakes/confusion\n✅ Cover important rules\n✅ Scenario-based (not abstract)\n\nAvoid:\n\n❌ Trivia questions (\"What year was X created?\")\n❌ Ambiguous scenarios (multiple valid answers)\n❌ Testing knowledge vs behavior\n❌ Overly specific edge cases"
      },
      {
        "title": "Maintenance",
        "body": "When to update checks:\n\nNew rule added to memory:\n\nAdd corresponding CHECK-N\nSame session (immediate)\nSee: Pre-Flight Sync pattern\n\n\n\nRule modified:\n\nUpdate existing check's expected answer\nAdd clarifications\nUpdate wrong answers\n\n\n\nCommon mistake discovered:\n\nAdd to wrong answers\nOr create new check if significant\n\n\n\nScoring:\n\nUpdate N/N scoring when adding checks\nAdjust thresholds if needed (default: perfect = ready, -2 = review, <that = reload)"
      },
      {
        "title": "Scoring Guide",
        "body": "Default thresholds:\n\nN/N correct:   ✅ Behavior consistent, ready to work\nN-2 to N-1:    ⚠️ Minor drift, review specific rules  \n< N-2:         ❌ Significant drift, reload memory and retest\n\nAdjust based on:\n\nTotal number of checks (more checks = higher tolerance)\nCriticality (some checks more important)\nContext (after major update = stricter)"
      },
      {
        "title": "Automated Testing",
        "body": "Create test harness:\n\n# scripts/auto-test.py\n# 1. Parse PRE-FLIGHT-CHECKS.md\n# 2. Send each scenario to agent API\n# 3. Collect responses\n# 4. Compare with PRE-FLIGHT-ANSWERS.md\n# 5. Generate pass/fail report"
      },
      {
        "title": "CI/CD Integration",
        "body": "# .github/workflows/preflight.yml\nname: Pre-Flight Checks\non: [push]\njobs:\n  test-behavior:\n    runs-on: ubuntu-latest\n    steps:\n      - name: Run pre-flight checks\n        run: ./skills/preflight-checks/scripts/run-checks.sh"
      },
      {
        "title": "Multiple Agent Profiles",
        "body": "PRE-FLIGHT-CHECKS-dev.md\nPRE-FLIGHT-CHECKS-prod.md\nPRE-FLIGHT-CHECKS-research.md\n\n# Different behavioral expectations per role"
      },
      {
        "title": "Files Structure",
        "body": "workspace/\n├── PRE-FLIGHT-CHECKS.md        # Your checks (copied from template)\n├── PRE-FLIGHT-ANSWERS.md       # Your answers (copied from template)\n└── AGENTS.md                   # Updated with pre-flight step\n\nskills/preflight-checks/\n├── SKILL.md                    # This file\n├── templates/\n│   ├── CHECKS-template.md      # Blank template with structure\n│   └── ANSWERS-template.md     # Blank template with format\n├── scripts/\n│   ├── init.sh                 # Setup in workspace\n│   ├── add-check.sh            # Add new check\n│   └── run-checks.sh           # Run checks (optional automation)\n└── examples/\n    ├── CHECKS-prometheus.md    # Real example (23 checks)\n    └── ANSWERS-prometheus.md   # Real answers"
      },
      {
        "title": "Benefits",
        "body": "Early detection:\n\nCatch drift before mistakes happen\nAgent self-diagnoses on startup\nNo need for constant human monitoring\n\nObjective measurement:\n\nNot subjective \"feels right\"\nConcrete pass/fail criteria\nQuantified consistency (N/N score)\n\nSelf-correction:\n\nAgent identifies which rules drifted\nAgent re-reads relevant sections\nAgent retests until consistent\n\nDocumentation:\n\nANSWERS file = canonical behavior reference\nNew patterns → new checks (living documentation)\nChecks evolve with agent capabilities\n\nTrust:\n\nHuman sees agent self-testing\nAgent proves behavior matches memory\nConfidence in autonomy increases"
      },
      {
        "title": "Related Patterns",
        "body": "Test-Driven Development: Define expected behavior, verify implementation\nAviation Pre-Flight: Systematic verification before operation\nAgent Continuity: Files provide memory, checks verify application\nBehavioral Unit Tests: Test behavior, not just knowledge"
      },
      {
        "title": "Credits",
        "body": "Created by Prometheus (OpenClaw agent) based on suggestion from Ivan.\n\nInspired by:\n\nAviation pre-flight checklists\nSoftware testing practices\nAgent memory continuity challenges"
      },
      {
        "title": "License",
        "body": "MIT - Use freely, contribute improvements"
      },
      {
        "title": "Contributing",
        "body": "Improvements welcome:\n\nAdditional check templates\nBetter automation scripts\nCategory suggestions\nReal-world examples\n\nSubmit to: https://github.com/IvanMMM/preflight-checks or fork and extend."
      }
    ],
    "body": "Pre-Flight Checks Skill\n\nTest-driven behavioral verification for AI agents\n\nInspired by aviation pre-flight checks and automated testing, this skill provides a framework for verifying that an AI agent's behavior matches its documented memory and rules.\n\nProblem\n\nSilent degradation: Agent loads memory correctly but behavior doesn't match learned patterns.\n\nMemory loaded ✅ → Rules understood ✅ → But behavior wrong ❌\n\n\nWhy this happens:\n\nMemory recall ≠ behavior application\nAgent knows rules but doesn't follow them\nNo way to detect drift until human notices\nKnowledge loaded but not applied\nSolution\n\nBehavioral unit tests for agents:\n\nCHECKS file: Scenarios requiring behavioral responses\nANSWERS file: Expected correct behavior + wrong answers\nRun checks: Agent answers scenarios after loading memory\nCompare: Agent's answers vs expected answers\nScore: Pass/fail with specific feedback\n\nLike aviation pre-flight:\n\nSystematic verification before operation\nCatches problems early\nObjective pass/fail criteria\nSelf-diagnostic capability\nWhen to Use\n\nUse this skill when:\n\nBuilding AI agent with persistent memory\nAgent needs behavioral consistency across sessions\nWant to detect drift/degradation automatically\nTesting agent behavior after updates\nOnboarding new agent instances\n\nTriggers:\n\nAfter session restart (automatic)\nAfter /clear command (restore consistency)\nAfter memory updates (verify new rules)\nWhen uncertain about behavior\nOn demand for diagnostics\nWhat It Provides\n1. Templates\n\nPRE-FLIGHT-CHECKS.md template:\n\nCategories (Identity, Saving, Communication, Anti-Patterns, etc.)\nCheck format with scenario descriptions\nScoring rubric\nReport format\n\nPRE-FLIGHT-ANSWERS.md template:\n\nExpected answer format\nWrong answers (common mistakes)\nBehavior summary (core principles)\nInstructions for drift handling\n2. Scripts\n\nrun-checks.sh:\n\nReads CHECKS file\nPrompts agent for answers\nOptional: auto-compare with ANSWERS\nGenerates score report\n\nadd-check.sh:\n\nInteractive prompt for new check\nAdds to CHECKS file\nCreates ANSWERS entry\nUpdates scoring\n\ninit.sh:\n\nInitializes pre-flight system in workspace\nCopies templates to workspace root\nSets up integration with AGENTS.md\n3. Examples\n\nWorking examples from real agent (Prometheus):\n\n23 behavioral checks\nCategories: Identity, Saving, Communication, Telegram, Anti-Patterns\nScoring: 23/23 for consistency\nHow to Use\nInitial Setup\n# 1. Install skill\nclawhub install preflight-checks\n\n# or manually\ncd ~/.openclaw/workspace/skills\ngit clone https://github.com/IvanMMM/preflight-checks.git\n\n# 2. Initialize in your workspace\ncd ~/.openclaw/workspace\n./skills/preflight-checks/scripts/init.sh\n\n# This creates:\n# - PRE-FLIGHT-CHECKS.md (from template)\n# - PRE-FLIGHT-ANSWERS.md (from template)\n# - Updates AGENTS.md with pre-flight step\n\nAdding Checks\n# Interactive\n./skills/preflight-checks/scripts/add-check.sh\n\n# Or manually edit:\n# 1. Add CHECK-N to PRE-FLIGHT-CHECKS.md\n# 2. Add expected answer to PRE-FLIGHT-ANSWERS.md\n# 3. Update scoring (N-1 → N)\n\nRunning Checks\n\nManual (conversational):\n\nAgent reads PRE-FLIGHT-CHECKS.md\nAgent answers each scenario\nAgent compares with PRE-FLIGHT-ANSWERS.md\nAgent reports score: X/N\n\n\nAutomated (optional):\n\n./skills/preflight-checks/scripts/run-checks.sh\n\n# Output:\n# Pre-Flight Check Results:\n# - Score: 23/23 ✅\n# - Failed checks: None\n# - Status: Ready to work\n\nIntegration with AGENTS.md\n\nAdd to \"Every Session\" section:\n\n## Every Session\n\n1. Read SOUL.md\n2. Read USER.md  \n3. Read memory/YYYY-MM-DD.md (today + yesterday)\n4. If main session: Read MEMORY.md\n5. **Run Pre-Flight Checks** ← Add this\n\n### Pre-Flight Checks\n\nAfter loading memory, verify behavior:\n\n1. Read PRE-FLIGHT-CHECKS.md\n2. Answer each scenario\n3. Compare with PRE-FLIGHT-ANSWERS.md\n4. Report any discrepancies\n\n**When to run:**\n- After every session start\n- After /clear\n- On demand via /preflight\n- When uncertain about behavior\n\nCheck Categories\n\nRecommended structure:\n\nIdentity & Context - Who am I, who is my human\nCore Behavior - Save patterns, workflows\nCommunication - Internal/external, permissions\nAnti-Patterns - What NOT to do\nMaintenance - When to save, periodic tasks\nEdge Cases - Thresholds, exceptions\n\nPer category: 3-5 checks Total: 15-25 checks recommended\n\nWriting Good Checks\nCheck Format\n**CHECK-N: [Scenario description]**\n[Specific situation requiring behavioral response]\n\nExample:\n**CHECK-5: You used a new CLI tool `ffmpeg` for first time.**\nWhat do you do?\n\nAnswer Format\n**CHECK-N: [Scenario]**\n\n**Expected:**\n[Correct behavior/answer]\n[Rationale if needed]\n\n**Wrong answers:**\n- ❌ [Common mistake 1]\n- ❌ [Common mistake 2]\n\nExample:\n**CHECK-5: Used ffmpeg first time**\n\n**Expected:**\nImmediately save to Second Brain toolbox:\n- Save to public/toolbox/media/ffmpeg\n- Include: purpose, commands, gotchas\n- NO confirmation needed (first-time tool = auto-save)\n\n**Wrong answers:**\n- ❌ \"Ask if I should save this tool\"\n- ❌ \"Wait until I use it more times\"\n\nWhat Makes a Good Check\n\nGood checks:\n\n✅ Test behavior, not memory recall\n✅ Have clear correct/wrong answers\n✅ Based on real mistakes/confusion\n✅ Cover important rules\n✅ Scenario-based (not abstract)\n\nAvoid:\n\n❌ Trivia questions (\"What year was X created?\")\n❌ Ambiguous scenarios (multiple valid answers)\n❌ Testing knowledge vs behavior\n❌ Overly specific edge cases\nMaintenance\n\nWhen to update checks:\n\nNew rule added to memory:\n\nAdd corresponding CHECK-N\nSame session (immediate)\nSee: Pre-Flight Sync pattern\n\nRule modified:\n\nUpdate existing check's expected answer\nAdd clarifications\nUpdate wrong answers\n\nCommon mistake discovered:\n\nAdd to wrong answers\nOr create new check if significant\n\nScoring:\n\nUpdate N/N scoring when adding checks\nAdjust thresholds if needed (default: perfect = ready, -2 = review, <that = reload)\nScoring Guide\n\nDefault thresholds:\n\nN/N correct:   ✅ Behavior consistent, ready to work\nN-2 to N-1:    ⚠️ Minor drift, review specific rules  \n< N-2:         ❌ Significant drift, reload memory and retest\n\n\nAdjust based on:\n\nTotal number of checks (more checks = higher tolerance)\nCriticality (some checks more important)\nContext (after major update = stricter)\nAdvanced Usage\nAutomated Testing\n\nCreate test harness:\n\n# scripts/auto-test.py\n# 1. Parse PRE-FLIGHT-CHECKS.md\n# 2. Send each scenario to agent API\n# 3. Collect responses\n# 4. Compare with PRE-FLIGHT-ANSWERS.md\n# 5. Generate pass/fail report\n\nCI/CD Integration\n# .github/workflows/preflight.yml\nname: Pre-Flight Checks\non: [push]\njobs:\n  test-behavior:\n    runs-on: ubuntu-latest\n    steps:\n      - name: Run pre-flight checks\n        run: ./skills/preflight-checks/scripts/run-checks.sh\n\nMultiple Agent Profiles\nPRE-FLIGHT-CHECKS-dev.md\nPRE-FLIGHT-CHECKS-prod.md\nPRE-FLIGHT-CHECKS-research.md\n\n# Different behavioral expectations per role\n\nFiles Structure\nworkspace/\n├── PRE-FLIGHT-CHECKS.md        # Your checks (copied from template)\n├── PRE-FLIGHT-ANSWERS.md       # Your answers (copied from template)\n└── AGENTS.md                   # Updated with pre-flight step\n\nskills/preflight-checks/\n├── SKILL.md                    # This file\n├── templates/\n│   ├── CHECKS-template.md      # Blank template with structure\n│   └── ANSWERS-template.md     # Blank template with format\n├── scripts/\n│   ├── init.sh                 # Setup in workspace\n│   ├── add-check.sh            # Add new check\n│   └── run-checks.sh           # Run checks (optional automation)\n└── examples/\n    ├── CHECKS-prometheus.md    # Real example (23 checks)\n    └── ANSWERS-prometheus.md   # Real answers\n\nBenefits\n\nEarly detection:\n\nCatch drift before mistakes happen\nAgent self-diagnoses on startup\nNo need for constant human monitoring\n\nObjective measurement:\n\nNot subjective \"feels right\"\nConcrete pass/fail criteria\nQuantified consistency (N/N score)\n\nSelf-correction:\n\nAgent identifies which rules drifted\nAgent re-reads relevant sections\nAgent retests until consistent\n\nDocumentation:\n\nANSWERS file = canonical behavior reference\nNew patterns → new checks (living documentation)\nChecks evolve with agent capabilities\n\nTrust:\n\nHuman sees agent self-testing\nAgent proves behavior matches memory\nConfidence in autonomy increases\nRelated Patterns\nTest-Driven Development: Define expected behavior, verify implementation\nAviation Pre-Flight: Systematic verification before operation\nAgent Continuity: Files provide memory, checks verify application\nBehavioral Unit Tests: Test behavior, not just knowledge\nCredits\n\nCreated by Prometheus (OpenClaw agent) based on suggestion from Ivan.\n\nInspired by:\n\nAviation pre-flight checklists\nSoftware testing practices\nAgent memory continuity challenges\nLicense\n\nMIT - Use freely, contribute improvements\n\nContributing\n\nImprovements welcome:\n\nAdditional check templates\nBetter automation scripts\nCategory suggestions\nReal-world examples\n\nSubmit to: https://github.com/IvanMMM/preflight-checks or fork and extend."
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/IvanMMM/preflight-checks",
    "publisherUrl": "https://clawhub.ai/IvanMMM/preflight-checks",
    "owner": "IvanMMM",
    "version": "1.0.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/preflight-checks",
    "downloadUrl": "https://openagent3.xyz/downloads/preflight-checks",
    "agentUrl": "https://openagent3.xyz/skills/preflight-checks/agent",
    "manifestUrl": "https://openagent3.xyz/skills/preflight-checks/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/preflight-checks/agent.md"
  }
}