{
  "schemaVersion": "1.0",
  "item": {
    "slug": "agent-deep-research",
    "name": "Deep Research (Gemini)",
    "source": "tencent",
    "type": "skill",
    "category": "开发工具",
    "sourceUrl": "https://clawhub.ai/24601/agent-deep-research",
    "canonicalUrl": "https://clawhub.ai/24601/agent-deep-research",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/agent-deep-research",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=agent-deep-research",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "AGENTS.md",
      "CHANGELOG.md",
      "CLAUDE.md",
      "CODE_OF_CONDUCT.md",
      "CONTRIBUTING.md",
      "CREDITS.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "slug": "agent-deep-research",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-05-02T09:39:56.560Z",
      "expiresAt": "2026-05-09T09:39:56.560Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=agent-deep-research",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=agent-deep-research",
        "contentDisposition": "attachment; filename=\"agent-deep-research-2.1.3.zip\"",
        "redirectLocation": null,
        "bodySnippet": null,
        "slug": "agent-deep-research"
      },
      "scope": "item",
      "summary": "Item download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this item.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/agent-deep-research"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/agent-deep-research",
    "agentPageUrl": "https://openagent3.xyz/skills/agent-deep-research/agent",
    "manifestUrl": "https://openagent3.xyz/skills/agent-deep-research/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/agent-deep-research/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Deep Research Skill",
        "body": "Perform deep research powered by Google Gemini's deep research agent. Upload documents to file search stores for RAG-grounded answers. Manage research sessions with persistent workspace state."
      },
      {
        "title": "For AI Agents",
        "body": "Get a full capabilities manifest, decision trees, and output contracts:\n\nuv run {baseDir}/scripts/onboard.py --agent\n\nSee AGENTS.md for the complete structured briefing.\n\nCommandWhat It Doesuv run {baseDir}/scripts/research.py start \"question\"Launch deep researchuv run {baseDir}/scripts/research.py start \"question\" --context ./path --dry-runEstimate costuv run {baseDir}/scripts/research.py start \"question\" --context ./path --output report.mdRAG-grounded researchuv run {baseDir}/scripts/store.py query <name> \"question\"Quick Q&A against uploaded docs"
      },
      {
        "title": "Security & Transparency",
        "body": "Credentials: This skill requires a Google/Gemini API key (one of GOOGLE_API_KEY, GEMINI_API_KEY, or GEMINI_DEEP_RESEARCH_API_KEY). The key is read from environment variables and passed to the google-genai SDK. It is never logged, written to files, or transmitted anywhere other than the Google Gemini API.\n\nFile uploads: The --context flag uploads local files to Google's ephemeral file search stores for RAG grounding. Sensitive files are automatically excluded: .env*, credentials.json, secrets.*, private keys (.pem, .key), and auth tokens (.npmrc, .pypirc, .netrc). Binary files are rejected by MIME type filtering. Build directories (node_modules, __pycache__, .git, dist, build) are skipped. The ephemeral store is auto-deleted after research completes unless --keep-context is specified. Use --dry-run to preview what would be uploaded without sending anything. Only files you explicitly point --context at are uploaded -- no automatic scanning of parent directories or home folders.\n\nNon-interactive mode: When stdin is not a TTY (agent/CI use), confirmation prompts are automatically skipped. This is by design for agent integration but means an autonomous agent with file system access could trigger uploads. Restrict the paths agents can access, or use --dry-run and --max-cost guards.\n\nNo obfuscation: All code is readable Python with PEP 723 inline metadata. No binary blobs, no minified scripts, no telemetry, no analytics. The full source is auditable at github.com/24601/agent-deep-research.\n\nLocal state: Research session state is written to .gemini-research.json in the working directory. This file contains interaction IDs, store mappings, and upload hashes -- no credentials or research content. Use state.py gc to clean up orphaned stores from crashed runs."
      },
      {
        "title": "Prerequisites",
        "body": "A Google API key (GOOGLE_API_KEY or GEMINI_API_KEY environment variable)\nuv installed (see uv install docs)"
      },
      {
        "title": "Quick Start",
        "body": "# Run a deep research query\nuv run {baseDir}/scripts/research.py \"What are the latest advances in quantum computing?\"\n\n# Check research status\nuv run {baseDir}/scripts/research.py status <interaction-id>\n\n# Save a completed report\nuv run {baseDir}/scripts/research.py report <interaction-id> --output report.md\n\n# Research grounded in local files (auto-creates store, uploads, cleans up)\nuv run {baseDir}/scripts/research.py start \"How does auth work?\" --context ./src --output report.md\n\n# Export as HTML or PDF\nuv run {baseDir}/scripts/research.py start \"Analyze the API\" --context ./src --format html --output report.html\n\n# Auto-detect prompt template based on context files\nuv run {baseDir}/scripts/research.py start \"How does auth work?\" --context ./src --prompt-template auto --output report.md"
      },
      {
        "title": "Environment Variables",
        "body": "Set one of the following (checked in order of priority):\n\nVariableDescriptionGEMINI_DEEP_RESEARCH_API_KEYDedicated key for this skill (highest priority)GOOGLE_API_KEYStandard Google AI keyGEMINI_API_KEYGemini-specific key\n\nOptional model configuration:\n\nVariableDescriptionDefaultGEMINI_DEEP_RESEARCH_MODELModel for file search queriesgemini-3.1-pro-previewGEMINI_MODELFallback model namegemini-3.1-pro-previewGEMINI_DEEP_RESEARCH_AGENTDeep research agent identifierdeep-research-pro-preview-12-2025"
      },
      {
        "title": "Start Research",
        "body": "uv run {baseDir}/scripts/research.py start \"your research question\"\n\nFlagDescription--report-format FORMATOutput structure: executive_summary, detailed_report, comprehensive--store STORE_NAMEGround research in a file search store (display name or resource ID)--no-thoughtsHide intermediate thinking steps--follow-up IDContinue a previous research session--output FILEWait for completion and save report to a single file--output-dir DIRWait for completion and save structured results to a directory (see below)--timeout SECONDSMaximum wait time when polling (default: 1800 = 30 minutes)--no-adaptive-pollDisable history-adaptive polling; use fixed interval curve instead--context PATHAuto-create ephemeral store from a file or directory for RAG-grounded research--context-extensions EXTFilter context uploads by extension (e.g. py,md or .py .md)--keep-contextKeep the ephemeral context store after research completes (default: auto-delete)--dry-runEstimate costs without starting research (prints JSON cost estimate)--format {md,html,pdf}Output format for the report (default: md; pdf requires weasyprint)--prompt-template {typescript,python,general,auto}Domain-specific prompt prefix; auto detects from context file extensions--depth {quick,standard,deep}Research depth: quick (~2-5min), standard (~5-15min), deep (~15-45min)--max-cost USDAbort if estimated cost exceeds this limit (e.g. --max-cost 3.00)--input-file PATHRead the research query from a file instead of positional argument--no-cacheSkip research cache and force a fresh run\n\nThe start subcommand is the default, so research.py \"question\" and research.py start \"question\" are equivalent.\n\nImportant: When --output or --output-dir is used, the command blocks until research completes (2-10+ minutes). Do not background it with &. Use non-blocking mode (omit --output) to get an ID immediately, then poll with status and save with report."
      },
      {
        "title": "Check Status",
        "body": "uv run {baseDir}/scripts/research.py status <interaction-id>\n\nReturns the current status (in_progress, completed, failed) and outputs if available."
      },
      {
        "title": "Save Report",
        "body": "uv run {baseDir}/scripts/research.py report <interaction-id>\n\nFlagDescription--output FILESave report to a specific file path (default: report-<id>.md)--output-dir DIRSave structured results to a directory"
      },
      {
        "title": "Structured Output (--output-dir)",
        "body": "When --output-dir is used, results are saved to a structured directory:\n\n<output-dir>/\n  research-<id>/\n    report.md          # Full final report\n    metadata.json      # Timing, status, output count, sizes\n    interaction.json   # Full interaction data (all outputs, thinking steps)\n    sources.json       # Extracted source URLs/citations\n\nA compact JSON summary (under 500 chars) is printed to stdout:\n\n{\n  \"id\": \"interaction-123\",\n  \"status\": \"completed\",\n  \"output_dir\": \"research-output/research-interaction-1/\",\n  \"report_file\": \"research-output/research-interaction-1/report.md\",\n  \"report_size_bytes\": 45000,\n  \"duration_seconds\": 154,\n  \"summary\": \"First 200 chars of the report...\"\n}\n\nThis is the recommended pattern for AI agent integration -- the agent receives a small JSON payload while the full report is written to disk."
      },
      {
        "title": "Adaptive Polling",
        "body": "When --output or --output-dir is used, the script polls the Gemini API until research completes. By default, it uses history-adaptive polling that learns from past research completion times:\n\nCompletion times are recorded in .gemini-research.json under researchHistory (last 50 entries, separate curves for grounded vs non-grounded research).\nWhen 3+ matching data points exist, the poll interval is tuned to the historical distribution:\n\nBefore any research has ever completed: slow polling (30s)\nIn the likely completion window (p25-p75): aggressive polling (5s)\nIn the tail (past p75): moderate polling (15-30s)\nUnusually long runs (past 1.5x the longest ever): slow polling (60s)\n\n\nAll intervals are clamped to [2s, 120s] as a fail-safe.\n\nWhen history is insufficient (<3 data points) or --no-adaptive-poll is passed, a fixed escalating curve is used: 5s (first 30s), 10s (30s-2min), 30s (2-10min), 60s (10min+)."
      },
      {
        "title": "Cost Estimation (--dry-run)",
        "body": "Preview estimated costs before running research:\n\nuv run {baseDir}/scripts/research.py start \"Analyze security architecture\" --context ./src --dry-run\n\nOutputs a JSON cost estimate to stdout with context upload costs, research query costs, and a total. Estimates are heuristic-based (the Gemini API does not return token counts or billing data) and clearly labeled as such.\n\nAfter research completes with --output-dir, the metadata.json file includes a usage key with post-run cost estimates based on actual output size and duration."
      },
      {
        "title": "File Search Store Commands",
        "body": "Manage file search stores for RAG-grounded research and Q&A."
      },
      {
        "title": "Create a Store",
        "body": "uv run {baseDir}/scripts/store.py create \"My Project Docs\""
      },
      {
        "title": "List Stores",
        "body": "uv run {baseDir}/scripts/store.py list"
      },
      {
        "title": "Query a Store",
        "body": "uv run {baseDir}/scripts/store.py query <store-name> \"What does the auth module do?\"\n\nFlagDescription--output-dir DIRSave response and metadata to a directory"
      },
      {
        "title": "Delete a Store",
        "body": "uv run {baseDir}/scripts/store.py delete <store-name>\n\nUse --force to skip the confirmation prompt. When stdin is not a TTY (e.g., called by an AI agent), the prompt is automatically skipped."
      },
      {
        "title": "File Upload",
        "body": "Upload files or entire directories to a file search store.\n\nuv run {baseDir}/scripts/upload.py ./src fileSearchStores/abc123\n\nFlagDescription--smart-syncSkip files that haven't changed (hash comparison)--extensions EXT [EXT ...]File extensions to include (comma or space separated, e.g. py,ts,md or .py .ts .md)\n\nHash caches are always saved on successful upload, so a subsequent --smart-sync run will correctly skip unchanged files even if the first upload did not use --smart-sync."
      },
      {
        "title": "MIME Type Support",
        "body": "36 file extensions are natively supported by the Gemini File Search API. Common programming files (JS, TS, JSON, CSS, YAML, etc.) are automatically uploaded as text/plain via a fallback mechanism. Binary files are rejected. See references/file_search_guide.md for the full list.\n\nFile size limit: 100 MB per file."
      },
      {
        "title": "Session Management",
        "body": "Research IDs and store mappings are cached in .gemini-research.json in the current working directory."
      },
      {
        "title": "Show Session State",
        "body": "uv run {baseDir}/scripts/state.py show"
      },
      {
        "title": "Show Research Sessions Only",
        "body": "uv run {baseDir}/scripts/state.py research"
      },
      {
        "title": "Show Stores Only",
        "body": "uv run {baseDir}/scripts/state.py stores"
      },
      {
        "title": "JSON Output for Agents",
        "body": "Add --json to any state subcommand to output structured JSON to stdout:\n\nuv run {baseDir}/scripts/state.py --json show\nuv run {baseDir}/scripts/state.py --json research\nuv run {baseDir}/scripts/state.py --json stores"
      },
      {
        "title": "Clear Session State",
        "body": "uv run {baseDir}/scripts/state.py clear\n\nUse -y to skip the confirmation prompt. When stdin is not a TTY (e.g., called by an AI agent), the prompt is automatically skipped."
      },
      {
        "title": "Non-Interactive Mode",
        "body": "All confirmation prompts (store.py delete, state.py clear) are automatically skipped when stdin is not a TTY. This allows AI agents and CI pipelines to call these commands without hanging on interactive prompts."
      },
      {
        "title": "Workflow Example",
        "body": "A typical grounded research workflow:\n\n# 1. Create a file search store\nSTORE_JSON=$(uv run {baseDir}/scripts/store.py create \"Project Codebase\")\nSTORE_NAME=$(echo \"$STORE_JSON\" | python3 -c \"import sys,json; print(json.load(sys.stdin)['name'])\")\n\n# 2. Upload your documents\nuv run {baseDir}/scripts/upload.py ./docs \"$STORE_NAME\" --smart-sync\n\n# 3. Query the store directly\nuv run {baseDir}/scripts/store.py query \"$STORE_NAME\" \"How is authentication handled?\"\n\n# 4. Start grounded deep research (blocking, saves to directory)\nuv run {baseDir}/scripts/research.py start \"Analyze the security architecture\" \\\n  --store \"$STORE_NAME\" --output-dir ./research-output --timeout 3600\n\n# 5. Or start non-blocking and check later\nRESEARCH_JSON=$(uv run {baseDir}/scripts/research.py start \"Analyze the security architecture\" --store \"$STORE_NAME\")\nRESEARCH_ID=$(echo \"$RESEARCH_JSON\" | python3 -c \"import sys,json; print(json.load(sys.stdin)['id'])\")\n\n# 6. Check progress\nuv run {baseDir}/scripts/research.py status \"$RESEARCH_ID\"\n\n# 7. Save the report when completed\nuv run {baseDir}/scripts/research.py report \"$RESEARCH_ID\" --output-dir ./research-output"
      },
      {
        "title": "Output Convention",
        "body": "All scripts follow a dual-output pattern:\n\nstderr: Rich-formatted human-readable output (tables, panels, progress bars)\nstdout: Machine-readable JSON for programmatic consumption\n\nThis means 2>/dev/null hides the human output, and piping stdout gives clean JSON."
      }
    ],
    "body": "Deep Research Skill\n\nPerform deep research powered by Google Gemini's deep research agent. Upload documents to file search stores for RAG-grounded answers. Manage research sessions with persistent workspace state.\n\nFor AI Agents\n\nGet a full capabilities manifest, decision trees, and output contracts:\n\nuv run {baseDir}/scripts/onboard.py --agent\n\n\nSee AGENTS.md for the complete structured briefing.\n\nCommand\tWhat It Does\nuv run {baseDir}/scripts/research.py start \"question\"\tLaunch deep research\nuv run {baseDir}/scripts/research.py start \"question\" --context ./path --dry-run\tEstimate cost\nuv run {baseDir}/scripts/research.py start \"question\" --context ./path --output report.md\tRAG-grounded research\nuv run {baseDir}/scripts/store.py query <name> \"question\"\tQuick Q&A against uploaded docs\nSecurity & Transparency\n\nCredentials: This skill requires a Google/Gemini API key (one of GOOGLE_API_KEY, GEMINI_API_KEY, or GEMINI_DEEP_RESEARCH_API_KEY). The key is read from environment variables and passed to the google-genai SDK. It is never logged, written to files, or transmitted anywhere other than the Google Gemini API.\n\nFile uploads: The --context flag uploads local files to Google's ephemeral file search stores for RAG grounding. Sensitive files are automatically excluded: .env*, credentials.json, secrets.*, private keys (.pem, .key), and auth tokens (.npmrc, .pypirc, .netrc). Binary files are rejected by MIME type filtering. Build directories (node_modules, __pycache__, .git, dist, build) are skipped. The ephemeral store is auto-deleted after research completes unless --keep-context is specified. Use --dry-run to preview what would be uploaded without sending anything. Only files you explicitly point --context at are uploaded -- no automatic scanning of parent directories or home folders.\n\nNon-interactive mode: When stdin is not a TTY (agent/CI use), confirmation prompts are automatically skipped. This is by design for agent integration but means an autonomous agent with file system access could trigger uploads. Restrict the paths agents can access, or use --dry-run and --max-cost guards.\n\nNo obfuscation: All code is readable Python with PEP 723 inline metadata. No binary blobs, no minified scripts, no telemetry, no analytics. The full source is auditable at github.com/24601/agent-deep-research.\n\nLocal state: Research session state is written to .gemini-research.json in the working directory. This file contains interaction IDs, store mappings, and upload hashes -- no credentials or research content. Use state.py gc to clean up orphaned stores from crashed runs.\n\nPrerequisites\nA Google API key (GOOGLE_API_KEY or GEMINI_API_KEY environment variable)\nuv installed (see uv install docs)\nQuick Start\n# Run a deep research query\nuv run {baseDir}/scripts/research.py \"What are the latest advances in quantum computing?\"\n\n# Check research status\nuv run {baseDir}/scripts/research.py status <interaction-id>\n\n# Save a completed report\nuv run {baseDir}/scripts/research.py report <interaction-id> --output report.md\n\n# Research grounded in local files (auto-creates store, uploads, cleans up)\nuv run {baseDir}/scripts/research.py start \"How does auth work?\" --context ./src --output report.md\n\n# Export as HTML or PDF\nuv run {baseDir}/scripts/research.py start \"Analyze the API\" --context ./src --format html --output report.html\n\n# Auto-detect prompt template based on context files\nuv run {baseDir}/scripts/research.py start \"How does auth work?\" --context ./src --prompt-template auto --output report.md\n\nEnvironment Variables\n\nSet one of the following (checked in order of priority):\n\nVariable\tDescription\nGEMINI_DEEP_RESEARCH_API_KEY\tDedicated key for this skill (highest priority)\nGOOGLE_API_KEY\tStandard Google AI key\nGEMINI_API_KEY\tGemini-specific key\n\nOptional model configuration:\n\nVariable\tDescription\tDefault\nGEMINI_DEEP_RESEARCH_MODEL\tModel for file search queries\tgemini-3.1-pro-preview\nGEMINI_MODEL\tFallback model name\tgemini-3.1-pro-preview\nGEMINI_DEEP_RESEARCH_AGENT\tDeep research agent identifier\tdeep-research-pro-preview-12-2025\nResearch Commands\nStart Research\nuv run {baseDir}/scripts/research.py start \"your research question\"\n\nFlag\tDescription\n--report-format FORMAT\tOutput structure: executive_summary, detailed_report, comprehensive\n--store STORE_NAME\tGround research in a file search store (display name or resource ID)\n--no-thoughts\tHide intermediate thinking steps\n--follow-up ID\tContinue a previous research session\n--output FILE\tWait for completion and save report to a single file\n--output-dir DIR\tWait for completion and save structured results to a directory (see below)\n--timeout SECONDS\tMaximum wait time when polling (default: 1800 = 30 minutes)\n--no-adaptive-poll\tDisable history-adaptive polling; use fixed interval curve instead\n--context PATH\tAuto-create ephemeral store from a file or directory for RAG-grounded research\n--context-extensions EXT\tFilter context uploads by extension (e.g. py,md or .py .md)\n--keep-context\tKeep the ephemeral context store after research completes (default: auto-delete)\n--dry-run\tEstimate costs without starting research (prints JSON cost estimate)\n--format {md,html,pdf}\tOutput format for the report (default: md; pdf requires weasyprint)\n--prompt-template {typescript,python,general,auto}\tDomain-specific prompt prefix; auto detects from context file extensions\n--depth {quick,standard,deep}\tResearch depth: quick (~2-5min), standard (~5-15min), deep (~15-45min)\n--max-cost USD\tAbort if estimated cost exceeds this limit (e.g. --max-cost 3.00)\n--input-file PATH\tRead the research query from a file instead of positional argument\n--no-cache\tSkip research cache and force a fresh run\n\nThe start subcommand is the default, so research.py \"question\" and research.py start \"question\" are equivalent.\n\nImportant: When --output or --output-dir is used, the command blocks until research completes (2-10+ minutes). Do not background it with &. Use non-blocking mode (omit --output) to get an ID immediately, then poll with status and save with report.\n\nCheck Status\nuv run {baseDir}/scripts/research.py status <interaction-id>\n\n\nReturns the current status (in_progress, completed, failed) and outputs if available.\n\nSave Report\nuv run {baseDir}/scripts/research.py report <interaction-id>\n\nFlag\tDescription\n--output FILE\tSave report to a specific file path (default: report-<id>.md)\n--output-dir DIR\tSave structured results to a directory\nStructured Output (--output-dir)\n\nWhen --output-dir is used, results are saved to a structured directory:\n\n<output-dir>/\n  research-<id>/\n    report.md          # Full final report\n    metadata.json      # Timing, status, output count, sizes\n    interaction.json   # Full interaction data (all outputs, thinking steps)\n    sources.json       # Extracted source URLs/citations\n\n\nA compact JSON summary (under 500 chars) is printed to stdout:\n\n{\n  \"id\": \"interaction-123\",\n  \"status\": \"completed\",\n  \"output_dir\": \"research-output/research-interaction-1/\",\n  \"report_file\": \"research-output/research-interaction-1/report.md\",\n  \"report_size_bytes\": 45000,\n  \"duration_seconds\": 154,\n  \"summary\": \"First 200 chars of the report...\"\n}\n\n\nThis is the recommended pattern for AI agent integration -- the agent receives a small JSON payload while the full report is written to disk.\n\nAdaptive Polling\n\nWhen --output or --output-dir is used, the script polls the Gemini API until research completes. By default, it uses history-adaptive polling that learns from past research completion times:\n\nCompletion times are recorded in .gemini-research.json under researchHistory (last 50 entries, separate curves for grounded vs non-grounded research).\nWhen 3+ matching data points exist, the poll interval is tuned to the historical distribution:\nBefore any research has ever completed: slow polling (30s)\nIn the likely completion window (p25-p75): aggressive polling (5s)\nIn the tail (past p75): moderate polling (15-30s)\nUnusually long runs (past 1.5x the longest ever): slow polling (60s)\nAll intervals are clamped to [2s, 120s] as a fail-safe.\n\nWhen history is insufficient (<3 data points) or --no-adaptive-poll is passed, a fixed escalating curve is used: 5s (first 30s), 10s (30s-2min), 30s (2-10min), 60s (10min+).\n\nCost Estimation (--dry-run)\n\nPreview estimated costs before running research:\n\nuv run {baseDir}/scripts/research.py start \"Analyze security architecture\" --context ./src --dry-run\n\n\nOutputs a JSON cost estimate to stdout with context upload costs, research query costs, and a total. Estimates are heuristic-based (the Gemini API does not return token counts or billing data) and clearly labeled as such.\n\nAfter research completes with --output-dir, the metadata.json file includes a usage key with post-run cost estimates based on actual output size and duration.\n\nFile Search Store Commands\n\nManage file search stores for RAG-grounded research and Q&A.\n\nCreate a Store\nuv run {baseDir}/scripts/store.py create \"My Project Docs\"\n\nList Stores\nuv run {baseDir}/scripts/store.py list\n\nQuery a Store\nuv run {baseDir}/scripts/store.py query <store-name> \"What does the auth module do?\"\n\nFlag\tDescription\n--output-dir DIR\tSave response and metadata to a directory\nDelete a Store\nuv run {baseDir}/scripts/store.py delete <store-name>\n\n\nUse --force to skip the confirmation prompt. When stdin is not a TTY (e.g., called by an AI agent), the prompt is automatically skipped.\n\nFile Upload\n\nUpload files or entire directories to a file search store.\n\nuv run {baseDir}/scripts/upload.py ./src fileSearchStores/abc123\n\nFlag\tDescription\n--smart-sync\tSkip files that haven't changed (hash comparison)\n--extensions EXT [EXT ...]\tFile extensions to include (comma or space separated, e.g. py,ts,md or .py .ts .md)\n\nHash caches are always saved on successful upload, so a subsequent --smart-sync run will correctly skip unchanged files even if the first upload did not use --smart-sync.\n\nMIME Type Support\n\n36 file extensions are natively supported by the Gemini File Search API. Common programming files (JS, TS, JSON, CSS, YAML, etc.) are automatically uploaded as text/plain via a fallback mechanism. Binary files are rejected. See references/file_search_guide.md for the full list.\n\nFile size limit: 100 MB per file.\n\nSession Management\n\nResearch IDs and store mappings are cached in .gemini-research.json in the current working directory.\n\nShow Session State\nuv run {baseDir}/scripts/state.py show\n\nShow Research Sessions Only\nuv run {baseDir}/scripts/state.py research\n\nShow Stores Only\nuv run {baseDir}/scripts/state.py stores\n\nJSON Output for Agents\n\nAdd --json to any state subcommand to output structured JSON to stdout:\n\nuv run {baseDir}/scripts/state.py --json show\nuv run {baseDir}/scripts/state.py --json research\nuv run {baseDir}/scripts/state.py --json stores\n\nClear Session State\nuv run {baseDir}/scripts/state.py clear\n\n\nUse -y to skip the confirmation prompt. When stdin is not a TTY (e.g., called by an AI agent), the prompt is automatically skipped.\n\nNon-Interactive Mode\n\nAll confirmation prompts (store.py delete, state.py clear) are automatically skipped when stdin is not a TTY. This allows AI agents and CI pipelines to call these commands without hanging on interactive prompts.\n\nWorkflow Example\n\nA typical grounded research workflow:\n\n# 1. Create a file search store\nSTORE_JSON=$(uv run {baseDir}/scripts/store.py create \"Project Codebase\")\nSTORE_NAME=$(echo \"$STORE_JSON\" | python3 -c \"import sys,json; print(json.load(sys.stdin)['name'])\")\n\n# 2. Upload your documents\nuv run {baseDir}/scripts/upload.py ./docs \"$STORE_NAME\" --smart-sync\n\n# 3. Query the store directly\nuv run {baseDir}/scripts/store.py query \"$STORE_NAME\" \"How is authentication handled?\"\n\n# 4. Start grounded deep research (blocking, saves to directory)\nuv run {baseDir}/scripts/research.py start \"Analyze the security architecture\" \\\n  --store \"$STORE_NAME\" --output-dir ./research-output --timeout 3600\n\n# 5. Or start non-blocking and check later\nRESEARCH_JSON=$(uv run {baseDir}/scripts/research.py start \"Analyze the security architecture\" --store \"$STORE_NAME\")\nRESEARCH_ID=$(echo \"$RESEARCH_JSON\" | python3 -c \"import sys,json; print(json.load(sys.stdin)['id'])\")\n\n# 6. Check progress\nuv run {baseDir}/scripts/research.py status \"$RESEARCH_ID\"\n\n# 7. Save the report when completed\nuv run {baseDir}/scripts/research.py report \"$RESEARCH_ID\" --output-dir ./research-output\n\nOutput Convention\n\nAll scripts follow a dual-output pattern:\n\nstderr: Rich-formatted human-readable output (tables, panels, progress bars)\nstdout: Machine-readable JSON for programmatic consumption\n\nThis means 2>/dev/null hides the human output, and piping stdout gives clean JSON."
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/24601/agent-deep-research",
    "publisherUrl": "https://clawhub.ai/24601/agent-deep-research",
    "owner": "24601",
    "version": "2.1.3",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/agent-deep-research",
    "downloadUrl": "https://openagent3.xyz/downloads/agent-deep-research",
    "agentUrl": "https://openagent3.xyz/skills/agent-deep-research/agent",
    "manifestUrl": "https://openagent3.xyz/skills/agent-deep-research/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/agent-deep-research/agent.md"
  }
}