{
  "schemaVersion": "1.0",
  "item": {
    "slug": "multi-model-critique",
    "name": "Multi Model Critique",
    "source": "tencent",
    "type": "skill",
    "category": "AI 智能",
    "sourceUrl": "https://clawhub.ai/prairiedoggg/multi-model-critique",
    "canonicalUrl": "https://clawhub.ai/prairiedoggg/multi-model-critique",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/multi-model-critique",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=multi-model-critique",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SKILL.md",
      "references/orchestration-template.md",
      "references/output-schema.md",
      "references/prompt-templates.md",
      "scripts/build_round_prompts.py",
      "scripts/run_orchestration.py"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "slug": "multi-model-critique",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-05-02T15:46:53.878Z",
      "expiresAt": "2026-05-09T15:46:53.878Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=multi-model-critique",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=multi-model-critique",
        "contentDisposition": "attachment; filename=\"multi-model-critique-1.0.1.zip\"",
        "redirectLocation": null,
        "bodySnippet": null,
        "slug": "multi-model-critique"
      },
      "scope": "item",
      "summary": "Item download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this item.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/multi-model-critique"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/multi-model-critique",
    "agentPageUrl": "https://openagent3.xyz/skills/multi-model-critique/agent",
    "manifestUrl": "https://openagent3.xyz/skills/multi-model-critique/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/multi-model-critique/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Overview",
        "body": "Use this skill only for complex tasks. Route multiple models through the same 4-step loop (Plan -> Execute -> Review -> Improve), then run cross-critique and synthesis to produce a higher-quality final answer than any single-model draft."
      },
      {
        "title": "Trigger rule",
        "body": "Enable this skill only when the request explicitly sets complex to true (or equivalent wording such as “this is complex/deep”).\n\nIf complex is false, skip this skill and respond with normal single-model behavior."
      },
      {
        "title": "Inputs",
        "body": "Collect or confirm these inputs before execution:\n\ncomplex: boolean flag (must be true)\nquestion: user request\nmodels: list of ACP agentId values (typically 3)\nconstraints: output format, language, length, deadlines, forbidden assumptions\nops: optional runtime controls (timeoutSec, maxRetries, maxRounds, budgetUsd)"
      },
      {
        "title": "File map (what each file does)",
        "body": "SKILL.md (this file): orchestration policy, trigger conditions, and execution sequence.\nreferences/prompt-templates.md: reusable prompts for draft, critique, revision, and final synthesis (includes scoring rubric usage).\nreferences/orchestration-template.md: practical OpenClaw orchestration flow using sessions_spawn, sessions_send, and sessions_history.\nreferences/output-schema.md: machine-parseable JSON output schema for final result and per-model scoring.\nscripts/build_round_prompts.py: utility to generate per-model prompt files for repeated runs.\nscripts/run_orchestration.py: local helper that builds a run plan JSON (model mapping, round prompts, runtime settings)."
      },
      {
        "title": "Step 1) Parallel draft round",
        "body": "Spawn one ACP session per model with the same task and constraints.\n\nPer-model requirements:\n\nFollow the exact internal sequence: Plan -> Execute -> Review -> Improve\nPrint all four sections explicitly\nEnd with Draft Answer\n\nUse sessions_spawn with runtime:\"acp\" and explicit agentId."
      },
      {
        "title": "Step 2) Cross-critique round",
        "body": "Share peer Draft Answer outputs with each model and require structured critique:\n\nStrengths\nWeaknesses\nMissing assumptions/data\nHallucination and confidence risks\nConcrete fix suggestions\n\nAlso require ranking of peer drafts with rationale."
      },
      {
        "title": "Step 3) Revision round",
        "body": "Send critique feedback back to each original model and request revision:\n\nKeep Plan -> Execute -> Review -> Improve\nInclude Changes from Critique\nEnd with Revised Answer"
      },
      {
        "title": "Step 4) Final synthesis round",
        "body": "Integrate revised answers into one user-facing output:\n\nBest final answer\nWhy the synthesis is stronger than individual drafts\nRemaining uncertainties\nOptional next actions"
      },
      {
        "title": "Scoring rubric (required in critique + synthesis)",
        "body": "Score each draft on a 1-5 scale:\n\naccuracy: factual correctness and internal consistency\ncoverage: completeness against user request and constraints\nevidence: quality of assumptions and support\nactionability: usefulness for concrete decision/action\n\nDefault weighted score:\n0.40 * accuracy + 0.25 * coverage + 0.20 * evidence + 0.15 * actionability\n\nUse this score to justify rankings and the final selected direction."
      },
      {
        "title": "Prompting resources",
        "body": "Use references/prompt-templates.md for canonical prompts.\nUse scripts/build_round_prompts.py when you need file-based prompt generation for repeated or batched runs.\nUse scripts/run_orchestration.py to generate a deterministic run-plan artifact for reproducible execution.\nUse references/orchestration-template.md for concrete OpenClaw tool-call flow."
      },
      {
        "title": "Required user-facing output shape",
        "body": "Final Answer\nKey Improvements from Critique\nUncertainties\nNext Steps (optional)\n\nWhen machine consumption is needed, return JSON matching references/output-schema.md.\n\nDo not expose private chain-of-thought. Provide concise reasoning summaries only."
      },
      {
        "title": "Failure handling",
        "body": "One model fails: continue with remaining models and note reduced diversity.\nTwo or more models fail: ask whether to retry or switch to single-model mode.\nStrong disagreement remains: present competing hypotheses and state what evidence would resolve them."
      },
      {
        "title": "Runtime defaults (recommended)",
        "body": "timeoutSec: 180 per round per model\nmaxRetries: 1 per failed model turn\nmaxRounds: fixed at 4 (draft, critique, revision, synthesis)\nbudgetUsd: optional hard stop when cost-sensitive"
      }
    ],
    "body": "Multi-Model Critique\nOverview\n\nUse this skill only for complex tasks. Route multiple models through the same 4-step loop (Plan -> Execute -> Review -> Improve), then run cross-critique and synthesis to produce a higher-quality final answer than any single-model draft.\n\nTrigger rule\n\nEnable this skill only when the request explicitly sets complex to true (or equivalent wording such as “this is complex/deep”).\n\nIf complex is false, skip this skill and respond with normal single-model behavior.\n\nInputs\n\nCollect or confirm these inputs before execution:\n\ncomplex: boolean flag (must be true)\nquestion: user request\nmodels: list of ACP agentId values (typically 3)\nconstraints: output format, language, length, deadlines, forbidden assumptions\nops: optional runtime controls (timeoutSec, maxRetries, maxRounds, budgetUsd)\nFile map (what each file does)\nSKILL.md (this file): orchestration policy, trigger conditions, and execution sequence.\nreferences/prompt-templates.md: reusable prompts for draft, critique, revision, and final synthesis (includes scoring rubric usage).\nreferences/orchestration-template.md: practical OpenClaw orchestration flow using sessions_spawn, sessions_send, and sessions_history.\nreferences/output-schema.md: machine-parseable JSON output schema for final result and per-model scoring.\nscripts/build_round_prompts.py: utility to generate per-model prompt files for repeated runs.\nscripts/run_orchestration.py: local helper that builds a run plan JSON (model mapping, round prompts, runtime settings).\nWorkflow\nStep 1) Parallel draft round\n\nSpawn one ACP session per model with the same task and constraints.\n\nPer-model requirements:\n\nFollow the exact internal sequence: Plan -> Execute -> Review -> Improve\nPrint all four sections explicitly\nEnd with Draft Answer\n\nUse sessions_spawn with runtime:\"acp\" and explicit agentId.\n\nStep 2) Cross-critique round\n\nShare peer Draft Answer outputs with each model and require structured critique:\n\nStrengths\nWeaknesses\nMissing assumptions/data\nHallucination and confidence risks\nConcrete fix suggestions\n\nAlso require ranking of peer drafts with rationale.\n\nStep 3) Revision round\n\nSend critique feedback back to each original model and request revision:\n\nKeep Plan -> Execute -> Review -> Improve\nInclude Changes from Critique\nEnd with Revised Answer\nStep 4) Final synthesis round\n\nIntegrate revised answers into one user-facing output:\n\nBest final answer\nWhy the synthesis is stronger than individual drafts\nRemaining uncertainties\nOptional next actions\nScoring rubric (required in critique + synthesis)\n\nScore each draft on a 1-5 scale:\n\naccuracy: factual correctness and internal consistency\ncoverage: completeness against user request and constraints\nevidence: quality of assumptions and support\nactionability: usefulness for concrete decision/action\n\nDefault weighted score: 0.40 * accuracy + 0.25 * coverage + 0.20 * evidence + 0.15 * actionability\n\nUse this score to justify rankings and the final selected direction.\n\nPrompting resources\nUse references/prompt-templates.md for canonical prompts.\nUse scripts/build_round_prompts.py when you need file-based prompt generation for repeated or batched runs.\nUse scripts/run_orchestration.py to generate a deterministic run-plan artifact for reproducible execution.\nUse references/orchestration-template.md for concrete OpenClaw tool-call flow.\nRequired user-facing output shape\nFinal Answer\nKey Improvements from Critique\nUncertainties\nNext Steps (optional)\n\nWhen machine consumption is needed, return JSON matching references/output-schema.md.\n\nDo not expose private chain-of-thought. Provide concise reasoning summaries only.\n\nFailure handling\nOne model fails: continue with remaining models and note reduced diversity.\nTwo or more models fail: ask whether to retry or switch to single-model mode.\nStrong disagreement remains: present competing hypotheses and state what evidence would resolve them.\nRuntime defaults (recommended)\ntimeoutSec: 180 per round per model\nmaxRetries: 1 per failed model turn\nmaxRounds: fixed at 4 (draft, critique, revision, synthesis)\nbudgetUsd: optional hard stop when cost-sensitive"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/prairiedoggg/multi-model-critique",
    "publisherUrl": "https://clawhub.ai/prairiedoggg/multi-model-critique",
    "owner": "prairiedoggg",
    "version": "1.0.1",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/multi-model-critique",
    "downloadUrl": "https://openagent3.xyz/downloads/multi-model-critique",
    "agentUrl": "https://openagent3.xyz/skills/multi-model-critique/agent",
    "manifestUrl": "https://openagent3.xyz/skills/multi-model-critique/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/multi-model-critique/agent.md"
  }
}