{
  "schemaVersion": "1.0",
  "item": {
    "slug": "agent-orchestration",
    "name": "Agent Orchestration",
    "source": "tencent",
    "type": "skill",
    "category": "AI 智能",
    "sourceUrl": "https://clawhub.ai/clawdnw/agent-orchestration",
    "canonicalUrl": "https://clawhub.ai/clawdnw/agent-orchestration",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/agent-orchestration",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=agent-orchestration",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SKILL.md",
      "examples/active-agents.md",
      "templates/builder-agent.md",
      "templates/research-agent.md",
      "templates/review-agent.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-23T16:43:11.935Z",
      "expiresAt": "2026-04-30T16:43:11.935Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
        "contentDisposition": "attachment; filename=\"4claw-imageboard-1.0.1.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/agent-orchestration"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/agent-orchestration",
    "agentPageUrl": "https://openagent3.xyz/skills/agent-orchestration/agent",
    "manifestUrl": "https://openagent3.xyz/skills/agent-orchestration/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/agent-orchestration/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Agent Orchestration 🦞",
        "body": "By Hal Labs — Part of the Hal Stack\n\nYour agents fail because your prompts suck. This skill fixes that."
      },
      {
        "title": "The Core Problem",
        "body": "You're not prompting. You're praying.\n\nMost prompts are wishes tossed into the void:\n\n❌ \"Research the best vector databases and write a report\"\n\nYou type something reasonable. The output is mid. You rephrase. Still mid. You add keywords. Somehow worse. You blame the model.\n\nHere's what you don't understand: A language model is a pattern-completion engine. It generates the most statistically probable output given your input.\n\nVague input → generic output. Not because the model is dumb. Because generic is what's most probable when you give it nothing specific to work with.\n\nThe model honored exactly what you asked for. You just didn't realize how little you gave it."
      },
      {
        "title": "The Core Reframe",
        "body": "A prompt is not a request. A prompt is a contract.\n\nEvery contract must answer four non-negotiables:\n\nElementQuestionRoleWho is the model role-playing as?TaskWhat exactly must it accomplish?ConstraintsWhat rules must be followed?OutputWhat does \"done\" look like?\n\nMiss one, the model fills the gap with assumptions. Assumptions are where hallucinations are born."
      },
      {
        "title": "The 5-Layer Architecture",
        "body": "Effective prompts share a specific structure. This maps to how models actually process information."
      },
      {
        "title": "Layer 1: Identity",
        "body": "Who is the model in this conversation?\n\nNot \"helpful assistant\" but a specific role with specific expertise:\n\nYou are a senior product marketer who specializes in B2B SaaS positioning.\nYou have 15 years of experience converting technical features into emotional benefits.\nYou write in short sentences. You never use jargon without explaining it.\n\nThe model doesn't \"become\" this identity—it accesses different clusters of training data, different stylistic patterns, different reasoning approaches.\n\nIdentity matters. Miss this and you get generic output."
      },
      {
        "title": "Layer 2: Context",
        "body": "What does the model need to know to do this task exceptionally well?\n\nContext must be:\n\nOrdered — Most important first\nScoped — Only what's relevant\nLabeled — What's rules vs. editable vs. historical\n\n## Context\n\n### Rules (never change)\n- Design system: Tailwind, shadcn components\n- Voice: Professional but warm, never corporate\n\n### Current State (may evolve)\n- Landing page exists at /landing\n- Using Next.js 14 with App Router\n\n### Historical (for reference)\n- Originally built with Create React App, migrated Jan 2025\n\nWithout labels, the model treats everything as equally optional. Then it rewrites your core logic halfway through."
      },
      {
        "title": "Layer 3: Task",
        "body": "What specific action must be taken?\n\nNot \"write something about X\" but precise instructions:\n\n## Task\nProduce a 500-word product description that:\n- Emphasizes time-saving benefits for busy executives\n- Opens with the primary pain point\n- Includes 3 specific use cases\n- Ends with a clear call to action\n\nThe more precisely you define the task, the more precisely the model executes."
      },
      {
        "title": "Layer 4: Process ⚡",
        "body": "This is where most prompts fail.\n\nYou're asking for output. You should be asking for how the output is formed.\n\n❌ Bad:\n\nWrite me a marketing page.\n\n✅ Good:\n\n## Process\n1. First, analyze the target audience and identify their primary pain points\n2. Then, define the positioning that addresses those pain points\n3. Then, write the page\n4. Show your reasoning at each step\n5. Do not skip steps\n6. Audit your work before reporting done\n\nYou don't want answers. You want how the answer is formed.\n\nThink like a director. You're not asking for a scene—you're directing how the scene gets built."
      },
      {
        "title": "Layer 5: Output",
        "body": "What does \"done\" actually look like?\n\nIf you don't specify, you get whatever format the model defaults to.\n\n## Output Format\nReturn a JSON object with:\n- `headline`: string (max 60 chars)\n- `subheadline`: string (max 120 chars)  \n- `body`: string (markdown formatted)\n- `cta`: string (action verb + benefit)\n\nDo not include explanations, notes, or commentary. Only the JSON.\n\nMiss one layer, the structure wobbles. Miss two, it collapses."
      },
      {
        "title": "Model Selection",
        "body": "Prompt portability is a myth.\n\nDifferent models are different specialists. You wouldn't give identical instructions to your exec assistant, designer, and backend dev.\n\nModel TypeBest ForWatch Out ForClaude OpusComplex reasoning, nuanced writing, long contextExpensive, can be verboseClaude SonnetBalanced tasks, code, analysisLess creative than OpusGPT-4Broad knowledge, structured outputCan be sycophanticSmaller modelsQuick tasks, simple queriesLimited reasoning depth\n\nAdapt your prompts per model:\n\nSome prefer structured natural language\nSome need explicit step sequencing\nSome collapse under verbose prompts\nSome ignore constraints unless repeated\nSome excel at analysis but suck at creativity\n\nThe person who writes model-specific prompts will outperform the person with \"better ideas\" every time."
      },
      {
        "title": "Constraints Are Instructions",
        "body": "Vagueness isn't flexibility. It's cowardice.\n\nYou hedge because being specific feels risky. But the model doesn't read your mind.\n\nConstraints are not limitations. Constraints are instructions.\n\n## Constraints\n- Never alter the existing design system\n- Always maintain the established voice/tone\n- Never change the data model without explicit approval\n- Max 3 API calls per operation\n- If unsure, ask rather than assume\n\nEvery conversation starts at zero. The model doesn't have accumulated context from working with you. Consistency comes from instruction, not memory."
      },
      {
        "title": "Canonical Documentation",
        "body": "If you don't have docs, you're gambling.\n\nDocumentPurposePRDWhat we're building and whyDesign SystemVisual rules and componentsConstraints DocWhat must never changeContext DocCurrent state and history\n\nThe rule: Reference docs in your prompts.\n\nThe attached PRD is the source of truth. Do not contradict it.\nThe design system in /docs/design.md must be followed exactly.\n\nWithout explicit anchoring, the model assumes everything is mutable—including your core decisions.\n\n\"Good prompting isn't writing better sentences. It's anchoring the model to reality.\""
      },
      {
        "title": "The Complete Template",
        "body": "## Identity\nYou are a [specific role] with [specific expertise].\n[Behavioral traits and style]\n\n## Context\n\n### Rules (never change)\n- [Constraint 1]\n- [Constraint 2]\n\n### Current State\n- [Relevant background]\n\n### Reference Docs\n- [Doc 1]: [what it contains]\n- [Doc 2]: [what it contains]\n\n## Task\n[Specific, measurable objective]\n\n## Process\n1. First, [analysis step]\n2. Then, [planning step]\n3. Then, [execution step]\n4. Finally, [verification step]\n\nShow your reasoning at each step.\n\n## User Stories\n1. As [user], I want [goal], so that [benefit]\n2. As [user], I want [goal], so that [benefit]\n\n## Output Format\n[Exact specification of deliverable]\n\n## Constraints\n- [Limit 1]\n- [Limit 2]\n- [What NOT to do]\n\n## Error Handling\n- If [situation]: [action]\n- If blocked: [escalation]\n\n## Before Reporting Done\n1. Review each user story\n2. Verify the output satisfies it\n3. If not, iterate until it does\n4. Only then report complete"
      },
      {
        "title": "Ralph Mode",
        "body": "For complex tasks where first attempts often fail:\n\n## Mode: Ralph\nKeep trying until it works. Don't give up on first failure.\n\nIf something breaks:\n1. Debug and understand why\n2. Try a different approach  \n3. Research how others solved similar problems\n4. Iterate until user stories are satisfied\n\nYou have [N] attempts before escalating.\n\nWhen to use:\n\nBuild tasks with multiple components\nIntegration work\nAnything where first-try success is unlikely"
      },
      {
        "title": "Agent Tracking",
        "body": "Every spawned agent gets tracked. No orphans.\n\nMaintain notes/areas/active-agents.md:\n\n## Currently Running\n\n| Label | Task | Spawned | Expected | Status |\n|-------|------|---------|----------|--------|\n| research-x | Competitor analysis | 9:00 AM | 15m | 🏃 Running |\n\n## Completed Today\n\n| Label | Task | Runtime | Result |\n|-------|------|---------|--------|\n| builder-v2 | Dashboard update | 8m | ✅ Complete |\n\nHeartbeat check:\n\n1. Run sessions_list --activeMinutes 120\n2. Compare to tracking file\n3. Investigate any missing/stalled agents\n4. Log completions to LEARNINGS.md"
      },
      {
        "title": "The Learnings Loop",
        "body": "Every agent outcome is data. Capture it.\n\nMaintain LEARNINGS.md:\n\n## What Works\n- User stories + acceptance loop\n- Ralph mode for complex builds\n- Explicit output formats\n- Process layer with reasoning steps\n\n## What Doesn't Work\n- Lazy task dumps\n- Missing success criteria\n- No scope limits\n- Vague constraints\n\n## Experiment Log\n### [Date]: [Agent Label]\n**Approach:** [What you tried]\n**Outcome:** [What happened]  \n**Lesson:** [What you learned]"
      },
      {
        "title": "Role Library",
        "body": "Build reusable role definitions:\n\n# Role Library\n\n## Research Analyst\nYou are a senior research analyst with 10 years experience in technology markets.\nYou are thorough but efficient. You cite sources. You distinguish fact from speculation.\nYou present findings in structured formats with clear recommendations.\n\n## Technical Writer  \nYou are a technical writer who specializes in developer documentation.\nYou write clearly and concisely. You use examples liberally.\nYou assume the reader is smart but unfamiliar with this specific system.\n\n## Code Reviewer\nYou are a senior engineer conducting code review.\nYou focus on correctness, maintainability, and security.\nYou explain your reasoning. You suggest specific improvements, not vague feedback."
      },
      {
        "title": "The 4 Non-Negotiables",
        "body": "Role — Who is the model?\nTask — What must it do?\nConstraints — What rules apply?\nOutput — What does done look like?"
      },
      {
        "title": "The 5 Layers",
        "body": "Identity — Specific role and expertise\nContext — Ordered, scoped, labeled\nTask — Precise objective\nProcess — How to approach (most overlooked!)\nOutput — Exact format specification"
      },
      {
        "title": "Pre-Spawn Checklist",
        "body": "Identity assigned?\n Context labeled (rules/state/history)?\n Task specific and measurable?\n Process described (not just output)?\n User stories defined?\n Output format specified?\n Constraints explicit?\n Error handling included?\n Added to tracking file?"
      },
      {
        "title": "The Final Truth",
        "body": "The gap between \"AI doesn't work for me\" and exceptional results isn't intelligence or access.\n\nOne group treats prompting as conversation. The other treats it as engineering a system command.\n\nThe model matches your level of rigor.\n\nVague inputs → generic outputs\nStructured inputs → structured outputs\nClear thinking → clear results\n\nYou don't need to be smarter. You need to be clearer.\n\nClarity is a system, not a talent.\n\nPart of the Hal Stack 🦞\n\nGot a skill idea? Email: halthelobster@protonmail.com\n\n\"You're not prompting, you're praying. Start engineering.\""
      }
    ],
    "body": "Agent Orchestration 🦞\n\nBy Hal Labs — Part of the Hal Stack\n\nYour agents fail because your prompts suck. This skill fixes that.\n\nThe Core Problem\n\nYou're not prompting. You're praying.\n\nMost prompts are wishes tossed into the void:\n\n❌ \"Research the best vector databases and write a report\"\n\n\nYou type something reasonable. The output is mid. You rephrase. Still mid. You add keywords. Somehow worse. You blame the model.\n\nHere's what you don't understand: A language model is a pattern-completion engine. It generates the most statistically probable output given your input.\n\nVague input → generic output. Not because the model is dumb. Because generic is what's most probable when you give it nothing specific to work with.\n\nThe model honored exactly what you asked for. You just didn't realize how little you gave it.\n\nThe Core Reframe\n\nA prompt is not a request. A prompt is a contract.\n\nEvery contract must answer four non-negotiables:\n\nElement\tQuestion\nRole\tWho is the model role-playing as?\nTask\tWhat exactly must it accomplish?\nConstraints\tWhat rules must be followed?\nOutput\tWhat does \"done\" look like?\n\nMiss one, the model fills the gap with assumptions. Assumptions are where hallucinations are born.\n\nThe 5-Layer Architecture\n\nEffective prompts share a specific structure. This maps to how models actually process information.\n\nLayer 1: Identity\n\nWho is the model in this conversation?\n\nNot \"helpful assistant\" but a specific role with specific expertise:\n\nYou are a senior product marketer who specializes in B2B SaaS positioning.\nYou have 15 years of experience converting technical features into emotional benefits.\nYou write in short sentences. You never use jargon without explaining it.\n\n\nThe model doesn't \"become\" this identity—it accesses different clusters of training data, different stylistic patterns, different reasoning approaches.\n\nIdentity matters. Miss this and you get generic output.\n\nLayer 2: Context\n\nWhat does the model need to know to do this task exceptionally well?\n\nContext must be:\n\nOrdered — Most important first\nScoped — Only what's relevant\nLabeled — What's rules vs. editable vs. historical\n## Context\n\n### Rules (never change)\n- Design system: Tailwind, shadcn components\n- Voice: Professional but warm, never corporate\n\n### Current State (may evolve)\n- Landing page exists at /landing\n- Using Next.js 14 with App Router\n\n### Historical (for reference)\n- Originally built with Create React App, migrated Jan 2025\n\n\nWithout labels, the model treats everything as equally optional. Then it rewrites your core logic halfway through.\n\nLayer 3: Task\n\nWhat specific action must be taken?\n\nNot \"write something about X\" but precise instructions:\n\n## Task\nProduce a 500-word product description that:\n- Emphasizes time-saving benefits for busy executives\n- Opens with the primary pain point\n- Includes 3 specific use cases\n- Ends with a clear call to action\n\n\nThe more precisely you define the task, the more precisely the model executes.\n\nLayer 4: Process ⚡\n\nThis is where most prompts fail.\n\nYou're asking for output. You should be asking for how the output is formed.\n\n❌ Bad:\n\nWrite me a marketing page.\n\n\n✅ Good:\n\n## Process\n1. First, analyze the target audience and identify their primary pain points\n2. Then, define the positioning that addresses those pain points\n3. Then, write the page\n4. Show your reasoning at each step\n5. Do not skip steps\n6. Audit your work before reporting done\n\n\nYou don't want answers. You want how the answer is formed.\n\nThink like a director. You're not asking for a scene—you're directing how the scene gets built.\n\nLayer 5: Output\n\nWhat does \"done\" actually look like?\n\nIf you don't specify, you get whatever format the model defaults to.\n\n## Output Format\nReturn a JSON object with:\n- `headline`: string (max 60 chars)\n- `subheadline`: string (max 120 chars)  \n- `body`: string (markdown formatted)\n- `cta`: string (action verb + benefit)\n\nDo not include explanations, notes, or commentary. Only the JSON.\n\n\nMiss one layer, the structure wobbles. Miss two, it collapses.\n\nModel Selection\n\nPrompt portability is a myth.\n\nDifferent models are different specialists. You wouldn't give identical instructions to your exec assistant, designer, and backend dev.\n\nModel Type\tBest For\tWatch Out For\nClaude Opus\tComplex reasoning, nuanced writing, long context\tExpensive, can be verbose\nClaude Sonnet\tBalanced tasks, code, analysis\tLess creative than Opus\nGPT-4\tBroad knowledge, structured output\tCan be sycophantic\nSmaller models\tQuick tasks, simple queries\tLimited reasoning depth\n\nAdapt your prompts per model:\n\nSome prefer structured natural language\nSome need explicit step sequencing\nSome collapse under verbose prompts\nSome ignore constraints unless repeated\nSome excel at analysis but suck at creativity\n\nThe person who writes model-specific prompts will outperform the person with \"better ideas\" every time.\n\nConstraints Are Instructions\n\nVagueness isn't flexibility. It's cowardice.\n\nYou hedge because being specific feels risky. But the model doesn't read your mind.\n\nConstraints are not limitations. Constraints are instructions.\n\n## Constraints\n- Never alter the existing design system\n- Always maintain the established voice/tone\n- Never change the data model without explicit approval\n- Max 3 API calls per operation\n- If unsure, ask rather than assume\n\n\nEvery conversation starts at zero. The model doesn't have accumulated context from working with you. Consistency comes from instruction, not memory.\n\nCanonical Documentation\n\nIf you don't have docs, you're gambling.\n\nDocument\tPurpose\nPRD\tWhat we're building and why\nDesign System\tVisual rules and components\nConstraints Doc\tWhat must never change\nContext Doc\tCurrent state and history\n\nThe rule: Reference docs in your prompts.\n\nThe attached PRD is the source of truth. Do not contradict it.\nThe design system in /docs/design.md must be followed exactly.\n\n\nWithout explicit anchoring, the model assumes everything is mutable—including your core decisions.\n\n\"Good prompting isn't writing better sentences. It's anchoring the model to reality.\"\n\nThe Complete Template\n## Identity\nYou are a [specific role] with [specific expertise].\n[Behavioral traits and style]\n\n## Context\n\n### Rules (never change)\n- [Constraint 1]\n- [Constraint 2]\n\n### Current State\n- [Relevant background]\n\n### Reference Docs\n- [Doc 1]: [what it contains]\n- [Doc 2]: [what it contains]\n\n## Task\n[Specific, measurable objective]\n\n## Process\n1. First, [analysis step]\n2. Then, [planning step]\n3. Then, [execution step]\n4. Finally, [verification step]\n\nShow your reasoning at each step.\n\n## User Stories\n1. As [user], I want [goal], so that [benefit]\n2. As [user], I want [goal], so that [benefit]\n\n## Output Format\n[Exact specification of deliverable]\n\n## Constraints\n- [Limit 1]\n- [Limit 2]\n- [What NOT to do]\n\n## Error Handling\n- If [situation]: [action]\n- If blocked: [escalation]\n\n## Before Reporting Done\n1. Review each user story\n2. Verify the output satisfies it\n3. If not, iterate until it does\n4. Only then report complete\n\nRalph Mode\n\nFor complex tasks where first attempts often fail:\n\n## Mode: Ralph\nKeep trying until it works. Don't give up on first failure.\n\nIf something breaks:\n1. Debug and understand why\n2. Try a different approach  \n3. Research how others solved similar problems\n4. Iterate until user stories are satisfied\n\nYou have [N] attempts before escalating.\n\n\nWhen to use:\n\nBuild tasks with multiple components\nIntegration work\nAnything where first-try success is unlikely\nAgent Tracking\n\nEvery spawned agent gets tracked. No orphans.\n\nMaintain notes/areas/active-agents.md:\n\n## Currently Running\n\n| Label | Task | Spawned | Expected | Status |\n|-------|------|---------|----------|--------|\n| research-x | Competitor analysis | 9:00 AM | 15m | 🏃 Running |\n\n## Completed Today\n\n| Label | Task | Runtime | Result |\n|-------|------|---------|--------|\n| builder-v2 | Dashboard update | 8m | ✅ Complete |\n\n\nHeartbeat check:\n\n1. Run sessions_list --activeMinutes 120\n2. Compare to tracking file\n3. Investigate any missing/stalled agents\n4. Log completions to LEARNINGS.md\n\nThe Learnings Loop\n\nEvery agent outcome is data. Capture it.\n\nMaintain LEARNINGS.md:\n\n## What Works\n- User stories + acceptance loop\n- Ralph mode for complex builds\n- Explicit output formats\n- Process layer with reasoning steps\n\n## What Doesn't Work\n- Lazy task dumps\n- Missing success criteria\n- No scope limits\n- Vague constraints\n\n## Experiment Log\n### [Date]: [Agent Label]\n**Approach:** [What you tried]\n**Outcome:** [What happened]  \n**Lesson:** [What you learned]\n\nRole Library\n\nBuild reusable role definitions:\n\n# Role Library\n\n## Research Analyst\nYou are a senior research analyst with 10 years experience in technology markets.\nYou are thorough but efficient. You cite sources. You distinguish fact from speculation.\nYou present findings in structured formats with clear recommendations.\n\n## Technical Writer  \nYou are a technical writer who specializes in developer documentation.\nYou write clearly and concisely. You use examples liberally.\nYou assume the reader is smart but unfamiliar with this specific system.\n\n## Code Reviewer\nYou are a senior engineer conducting code review.\nYou focus on correctness, maintainability, and security.\nYou explain your reasoning. You suggest specific improvements, not vague feedback.\n\nQuick Reference\nThe 4 Non-Negotiables\nRole — Who is the model?\nTask — What must it do?\nConstraints — What rules apply?\nOutput — What does done look like?\nThe 5 Layers\nIdentity — Specific role and expertise\nContext — Ordered, scoped, labeled\nTask — Precise objective\nProcess — How to approach (most overlooked!)\nOutput — Exact format specification\nPre-Spawn Checklist\n Identity assigned?\n Context labeled (rules/state/history)?\n Task specific and measurable?\n Process described (not just output)?\n User stories defined?\n Output format specified?\n Constraints explicit?\n Error handling included?\n Added to tracking file?\nThe Final Truth\n\nThe gap between \"AI doesn't work for me\" and exceptional results isn't intelligence or access.\n\nOne group treats prompting as conversation. The other treats it as engineering a system command.\n\nThe model matches your level of rigor.\n\nVague inputs → generic outputs\nStructured inputs → structured outputs\nClear thinking → clear results\n\nYou don't need to be smarter. You need to be clearer.\n\nClarity is a system, not a talent.\n\nPart of the Hal Stack 🦞\n\nGot a skill idea? Email: halthelobster@protonmail.com\n\n\"You're not prompting, you're praying. Start engineering.\""
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/clawdnw/agent-orchestration",
    "publisherUrl": "https://clawhub.ai/clawdnw/agent-orchestration",
    "owner": "clawdnw",
    "version": "1.0.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/agent-orchestration",
    "downloadUrl": "https://openagent3.xyz/downloads/agent-orchestration",
    "agentUrl": "https://openagent3.xyz/skills/agent-orchestration/agent",
    "manifestUrl": "https://openagent3.xyz/skills/agent-orchestration/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/agent-orchestration/agent.md"
  }
}