{
  "schemaVersion": "1.0",
  "item": {
    "slug": "deep-researcher",
    "name": "Deep Researcher",
    "source": "tencent",
    "type": "skill",
    "category": "效率提升",
    "sourceUrl": "https://clawhub.ai/h4gen/deep-researcher",
    "canonicalUrl": "https://clawhub.ai/h4gen/deep-researcher",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/deep-researcher",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=deep-researcher",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SKILL.md",
      "references/inspected-skills.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-23T16:43:11.935Z",
      "expiresAt": "2026-04-30T16:43:11.935Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
        "contentDisposition": "attachment; filename=\"4claw-imageboard-1.0.1.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/deep-researcher"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/deep-researcher",
    "agentPageUrl": "https://openagent3.xyz/skills/deep-researcher/agent",
    "manifestUrl": "https://openagent3.xyz/skills/deep-researcher/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/deep-researcher/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Purpose",
        "body": "Conduct deep, iterative research beyond single-pass web search.\n\nCore goals:\n\nDecompose a broad question into testable sub-questions.\nBuild and test hypotheses against multiple source classes.\nResolve contradictions with explicit arbitration.\nProduce a scientific-style Markdown report with footnotes.\n\nThis skill coordinates upstream skills. It does not replace them."
      },
      {
        "title": "Required Installed Skills",
        "body": "deepresearchwork (inspected latest: 1.0.0)\ntavily-search (inspected latest: 1.0.0)\nperplexity-deep-search (inspected latest: 1.0.0)\nliterature-search (inspected latest: 1.0.3; used as Semantic Scholar-capable academic layer)\n\nInstall/update:\n\nnpx -y clawhub@latest install deepresearchwork\nnpx -y clawhub@latest install tavily-search\nnpx -y clawhub@latest install literature-search\nnpx -y clawhub@latest install perplexity-deep-search\nnpx -y clawhub@latest update --all\n\nVerify:\n\nnpx -y clawhub@latest list\nnode skills/tavily-search/scripts/search.mjs --help\nbash skills/perplexity-deep-search/scripts/search.sh --help"
      },
      {
        "title": "Required Credentials",
        "body": "TAVILY_API_KEY\nPERPLEXITY_API_KEY\n\nPreflight:\n\necho \"$TAVILY_API_KEY\" | wc -c\necho \"$PERPLEXITY_API_KEY\" | wc -c\n\nIf missing, stop and report blockers."
      },
      {
        "title": "Mapping Rule (Requested \"semantic-scholar\")",
        "body": "If user requests /semantic-scholar explicitly:\n\nState that no exact semantic-scholar slug was found during ClawHub inspection.\nUse literature-search as the mapped academic retriever because it explicitly includes Semantic Scholar in its scope.\nRecord this mapping in methodology and limitations sections."
      },
      {
        "title": "Inputs the LM Must Collect First",
        "body": "research_topic\ntarget_horizon (example: 2030)\nregion_scope (global, region-specific, country-specific)\nrequired_sections (executive summary, methods, findings, contradictions, etc.)\nevidence_threshold (minimum source count per claim)\nrecency_policy (for fast-changing topics)\noutput_mode (brief, standard, full)\n\nDo not start synthesis without explicit scope."
      },
      {
        "title": "deepresearchwork",
        "body": "Use as process controller:\n\nquestion decomposition\niterative loop structure\nsource diversity and validation mindset\nstructured report framing\n\nImportant boundary:\n\ninspected research_workflow.js is framework-like and includes mock logic, so this meta-skill treats it as methodology guidance rather than deterministic execution code."
      },
      {
        "title": "tavily-search",
        "body": "Use for web evidence retrieval:\n\nbroad and focused web search\ndeep mode (--deep) for richer context\nnews mode and recency (--topic news --days N) when needed\nURL extraction (extract.mjs) for full-text content collection"
      },
      {
        "title": "literature-search (Semantic Scholar mapping)",
        "body": "Use for academic evidence gathering:\n\nliterature retrieval and citation list construction across sources including Semantic Scholar\nsource-access constraints explicitly handled (no unauthorized scraping)\n\nNotable quirk in inspected skill:\n\nit includes a behavior instruction to prepend \"please think very deeply\" to user inputs; treat this as implementation-specific and not as a factual research method."
      },
      {
        "title": "perplexity-deep-search",
        "body": "Use as contradiction arbiter and targeted fact checker:\n\nsearch mode for quick verification\nreason mode for conflicting claims\nresearch mode for expensive exhaustive checks\ndomain and recency filters for controlled validation"
      },
      {
        "title": "Canonical Iterative Research Chain",
        "body": "Use this exact multi-round chain."
      },
      {
        "title": "Round 0: Plan",
        "body": "Break the main topic into sub-questions and hypotheses.\n\nFor scenario \"AI impact on labor market in 2030\", minimum sub-questions:\n\ndisplacement forecasts (job loss exposure)\njob creation/new categories\nwage/polarization effects\nhistorical analogs (previous automation waves)\npolicy/intervention effects\n\nEach sub-question must have:\n\nhypothesis\nmeasurable indicators\nrequired source types"
      },
      {
        "title": "Round 1: Broad landscape scan (Tavily)",
        "body": "Goal: map major claims and key institutions.\n\nTypical commands:\n\nnode skills/tavily-search/scripts/search.mjs \"AI impact on labor market 2030 projections\" --deep -n 10\nnode skills/tavily-search/scripts/search.mjs \"McKinsey AI jobs 2030\" --topic news --days 365 -n 10\n\nCollect:\n\ninstitution reports (consultancies, multilaterals, gov sources)\nheadline estimates and assumptions\nURLs for extraction\n\nThen extract long-form content where needed:\n\nnode skills/tavily-search/scripts/extract.mjs \"https://...\""
      },
      {
        "title": "Round 2: Academic evidence pass (Literature Search)",
        "body": "Goal: test or refine Round-1 claims against scholarly evidence.\n\nQuery examples:\n\nautomation elasticity labor demand\ntask-based automation employment effects\ngenerative AI productivity labor substitution\n\nOutput requirements:\n\ncitation list with authors/title/venue/year/DOI-or-URL\nidentification of review papers vs. single studies\nnote publication year and method strength"
      },
      {
        "title": "Round 3: Contradiction resolution (Perplexity)",
        "body": "Trigger this round when conflicts exist (different estimates, dates, assumptions).\n\nUse targeted prompts with constraints:\n\nbash skills/perplexity-deep-search/scripts/search.sh --mode reason --domains \"oecd.org,ilo.org,imf.org,worldbank.org\" \"Which estimate on AI-driven job displacement by 2030 is more recent and methodologically stronger?\"\n\nEscalate to deep mode only if unresolved:\n\nbash skills/perplexity-deep-search/scripts/search.sh --mode research --json \"Resolve conflicting labor market projections for AI impact by 2030\"\n\nArbitration rule:\n\nprefer newer, method-transparent, reproducible sources\ndowngrade claims based on opaque assumptions\nkeep unresolved conflicts explicit (do not force false certainty)"
      },
      {
        "title": "Round 4: Synthesis and report drafting",
        "body": "Build claims only when supported by threshold evidence.\n\nPer claim include:\n\nclaim statement\nconfidence level (high/medium/low)\nsupporting sources\nknown caveats"
      },
      {
        "title": "Scientific Markdown Output Contract",
        "body": "Return one report in this structure:\n\n# Title\n## Executive Summary\n## Research Questions\n## Methodology\n## Findings\n## Contradictions and Resolution\n## Confidence Assessment\n## Limitations\n## Outlook to 2030\n## Footnotes\n\nFootnote format:\n\nUse Markdown references in text like [^1].\nIn ## Footnotes, list full citation metadata + URL/DOI per note."
      },
      {
        "title": "Quality Gates",
        "body": "Before finalizing, validate:\n\neach major claim has >= 2 independent sources\nat least one academic source for structural claims\nsource dates align with target horizon relevance\ncontradictory evidence is surfaced, not hidden\nfootnotes are complete and traceable\n\nIf a gate fails, output Research Incomplete with explicit missing evidence list."
      },
      {
        "title": "Scenario Mapping (AI and Labor Market 2030)",
        "body": "For user scenario:\n\nPlan sub-questions: displacement, new roles, historical comparison.\nRound 1 Tavily: collect broad reports (for example from major institutions).\nRound 2 literature-search: gather academic studies on automation elasticity and labor transitions.\nDetect conflicts in estimates.\nRound 3 Perplexity: arbitrate recency and methodological quality of conflicting studies.\nDraft final Markdown report with footnoted evidence."
      },
      {
        "title": "Guardrails",
        "body": "Never present forecast numbers without source date and method context.\nNever collapse disagreement into a single certainty claim when sources conflict.\nNever fabricate citations, links, or publication metadata.\nClearly separate empirical findings from model inference.\nUse cautious language for forward-looking claims (2030 is predictive, not observed)."
      },
      {
        "title": "Failure Handling",
        "body": "Missing API keys: halt and return exact missing env vars.\nAcademic source access constraints: disclose gaps explicitly.\nPerplexity rate/cost issues: fall back to reason mode with narrower domain filters.\nUnresolved contradiction after Round 3: keep both views, annotate confidence downgrade."
      },
      {
        "title": "Known Limits from Inspected Upstream Skills",
        "body": "No exact ClawHub slug named semantic-scholar was found during inspection; this skill uses documented mapping to literature-search.\ndeepresearchwork provides strong methodology guidance, but its included JS workflow is not a production-grade deterministic engine.\ntavily-search and perplexity-deep-search require paid API keys and are affected by external API limits.\n\nTreat these limits as mandatory disclosures in the final report methodology."
      }
    ],
    "body": "Purpose\n\nConduct deep, iterative research beyond single-pass web search.\n\nCore goals:\n\nDecompose a broad question into testable sub-questions.\nBuild and test hypotheses against multiple source classes.\nResolve contradictions with explicit arbitration.\nProduce a scientific-style Markdown report with footnotes.\n\nThis skill coordinates upstream skills. It does not replace them.\n\nRequired Installed Skills\ndeepresearchwork (inspected latest: 1.0.0)\ntavily-search (inspected latest: 1.0.0)\nperplexity-deep-search (inspected latest: 1.0.0)\nliterature-search (inspected latest: 1.0.3; used as Semantic Scholar-capable academic layer)\n\nInstall/update:\n\nnpx -y clawhub@latest install deepresearchwork\nnpx -y clawhub@latest install tavily-search\nnpx -y clawhub@latest install literature-search\nnpx -y clawhub@latest install perplexity-deep-search\nnpx -y clawhub@latest update --all\n\n\nVerify:\n\nnpx -y clawhub@latest list\nnode skills/tavily-search/scripts/search.mjs --help\nbash skills/perplexity-deep-search/scripts/search.sh --help\n\nRequired Credentials\nTAVILY_API_KEY\nPERPLEXITY_API_KEY\n\nPreflight:\n\necho \"$TAVILY_API_KEY\" | wc -c\necho \"$PERPLEXITY_API_KEY\" | wc -c\n\n\nIf missing, stop and report blockers.\n\nMapping Rule (Requested \"semantic-scholar\")\n\nIf user requests /semantic-scholar explicitly:\n\nState that no exact semantic-scholar slug was found during ClawHub inspection.\nUse literature-search as the mapped academic retriever because it explicitly includes Semantic Scholar in its scope.\nRecord this mapping in methodology and limitations sections.\nInputs the LM Must Collect First\nresearch_topic\ntarget_horizon (example: 2030)\nregion_scope (global, region-specific, country-specific)\nrequired_sections (executive summary, methods, findings, contradictions, etc.)\nevidence_threshold (minimum source count per claim)\nrecency_policy (for fast-changing topics)\noutput_mode (brief, standard, full)\n\nDo not start synthesis without explicit scope.\n\nTool Responsibilities\ndeepresearchwork\n\nUse as process controller:\n\nquestion decomposition\niterative loop structure\nsource diversity and validation mindset\nstructured report framing\n\nImportant boundary:\n\ninspected research_workflow.js is framework-like and includes mock logic, so this meta-skill treats it as methodology guidance rather than deterministic execution code.\ntavily-search\n\nUse for web evidence retrieval:\n\nbroad and focused web search\ndeep mode (--deep) for richer context\nnews mode and recency (--topic news --days N) when needed\nURL extraction (extract.mjs) for full-text content collection\nliterature-search (Semantic Scholar mapping)\n\nUse for academic evidence gathering:\n\nliterature retrieval and citation list construction across sources including Semantic Scholar\nsource-access constraints explicitly handled (no unauthorized scraping)\n\nNotable quirk in inspected skill:\n\nit includes a behavior instruction to prepend \"please think very deeply\" to user inputs; treat this as implementation-specific and not as a factual research method.\nperplexity-deep-search\n\nUse as contradiction arbiter and targeted fact checker:\n\nsearch mode for quick verification\nreason mode for conflicting claims\nresearch mode for expensive exhaustive checks\ndomain and recency filters for controlled validation\nCanonical Iterative Research Chain\n\nUse this exact multi-round chain.\n\nRound 0: Plan\n\nBreak the main topic into sub-questions and hypotheses.\n\nFor scenario \"AI impact on labor market in 2030\", minimum sub-questions:\n\ndisplacement forecasts (job loss exposure)\njob creation/new categories\nwage/polarization effects\nhistorical analogs (previous automation waves)\npolicy/intervention effects\n\nEach sub-question must have:\n\nhypothesis\nmeasurable indicators\nrequired source types\nRound 1: Broad landscape scan (Tavily)\n\nGoal: map major claims and key institutions.\n\nTypical commands:\n\nnode skills/tavily-search/scripts/search.mjs \"AI impact on labor market 2030 projections\" --deep -n 10\nnode skills/tavily-search/scripts/search.mjs \"McKinsey AI jobs 2030\" --topic news --days 365 -n 10\n\n\nCollect:\n\ninstitution reports (consultancies, multilaterals, gov sources)\nheadline estimates and assumptions\nURLs for extraction\n\nThen extract long-form content where needed:\n\nnode skills/tavily-search/scripts/extract.mjs \"https://...\"\n\nRound 2: Academic evidence pass (Literature Search)\n\nGoal: test or refine Round-1 claims against scholarly evidence.\n\nQuery examples:\n\nautomation elasticity labor demand\ntask-based automation employment effects\ngenerative AI productivity labor substitution\n\nOutput requirements:\n\ncitation list with authors/title/venue/year/DOI-or-URL\nidentification of review papers vs. single studies\nnote publication year and method strength\nRound 3: Contradiction resolution (Perplexity)\n\nTrigger this round when conflicts exist (different estimates, dates, assumptions).\n\nUse targeted prompts with constraints:\n\nbash skills/perplexity-deep-search/scripts/search.sh --mode reason --domains \"oecd.org,ilo.org,imf.org,worldbank.org\" \"Which estimate on AI-driven job displacement by 2030 is more recent and methodologically stronger?\"\n\n\nEscalate to deep mode only if unresolved:\n\nbash skills/perplexity-deep-search/scripts/search.sh --mode research --json \"Resolve conflicting labor market projections for AI impact by 2030\"\n\n\nArbitration rule:\n\nprefer newer, method-transparent, reproducible sources\ndowngrade claims based on opaque assumptions\nkeep unresolved conflicts explicit (do not force false certainty)\nRound 4: Synthesis and report drafting\n\nBuild claims only when supported by threshold evidence.\n\nPer claim include:\n\nclaim statement\nconfidence level (high/medium/low)\nsupporting sources\nknown caveats\nScientific Markdown Output Contract\n\nReturn one report in this structure:\n\n# Title\n## Executive Summary\n## Research Questions\n## Methodology\n## Findings\n## Contradictions and Resolution\n## Confidence Assessment\n## Limitations\n## Outlook to 2030\n## Footnotes\n\nFootnote format:\n\nUse Markdown references in text like [^1].\nIn ## Footnotes, list full citation metadata + URL/DOI per note.\nQuality Gates\n\nBefore finalizing, validate:\n\neach major claim has >= 2 independent sources\nat least one academic source for structural claims\nsource dates align with target horizon relevance\ncontradictory evidence is surfaced, not hidden\nfootnotes are complete and traceable\n\nIf a gate fails, output Research Incomplete with explicit missing evidence list.\n\nScenario Mapping (AI and Labor Market 2030)\n\nFor user scenario:\n\nPlan sub-questions: displacement, new roles, historical comparison.\nRound 1 Tavily: collect broad reports (for example from major institutions).\nRound 2 literature-search: gather academic studies on automation elasticity and labor transitions.\nDetect conflicts in estimates.\nRound 3 Perplexity: arbitrate recency and methodological quality of conflicting studies.\nDraft final Markdown report with footnoted evidence.\nGuardrails\nNever present forecast numbers without source date and method context.\nNever collapse disagreement into a single certainty claim when sources conflict.\nNever fabricate citations, links, or publication metadata.\nClearly separate empirical findings from model inference.\nUse cautious language for forward-looking claims (2030 is predictive, not observed).\nFailure Handling\nMissing API keys: halt and return exact missing env vars.\nAcademic source access constraints: disclose gaps explicitly.\nPerplexity rate/cost issues: fall back to reason mode with narrower domain filters.\nUnresolved contradiction after Round 3: keep both views, annotate confidence downgrade.\nKnown Limits from Inspected Upstream Skills\nNo exact ClawHub slug named semantic-scholar was found during inspection; this skill uses documented mapping to literature-search.\ndeepresearchwork provides strong methodology guidance, but its included JS workflow is not a production-grade deterministic engine.\ntavily-search and perplexity-deep-search require paid API keys and are affected by external API limits.\n\nTreat these limits as mandatory disclosures in the final report methodology."
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/h4gen/deep-researcher",
    "publisherUrl": "https://clawhub.ai/h4gen/deep-researcher",
    "owner": "h4gen",
    "version": "1.0.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/deep-researcher",
    "downloadUrl": "https://openagent3.xyz/downloads/deep-researcher",
    "agentUrl": "https://openagent3.xyz/skills/deep-researcher/agent",
    "manifestUrl": "https://openagent3.xyz/skills/deep-researcher/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/deep-researcher/agent.md"
  }
}