{
  "schemaVersion": "1.0",
  "item": {
    "slug": "last30days",
    "name": "Last 30 Days",
    "source": "tencent",
    "type": "skill",
    "category": "开发工具",
    "sourceUrl": "https://clawhub.ai/zats/last30days",
    "canonicalUrl": "https://clawhub.ai/zats/last30days",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/last30days",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=last30days",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SKILL.md",
      "scripts/last30days.py",
      "scripts/lib/__init__.py",
      "scripts/lib/cache.py",
      "scripts/lib/dates.py",
      "scripts/lib/dedupe.py"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-30T16:55:25.780Z",
      "expiresAt": "2026-05-07T16:55:25.780Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
        "contentDisposition": "attachment; filename=\"network-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/last30days"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/last30days",
    "agentPageUrl": "https://openagent3.xyz/skills/last30days/agent",
    "manifestUrl": "https://openagent3.xyz/skills/last30days/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/last30days/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "last30days: Research Any Topic from the Last 30 Days",
        "body": "Research ANY topic across Reddit, X, and the web. Surface what people are actually discussing, recommending, and debating right now.\n\nUse cases:\n\nPrompting: \"photorealistic people in Nano Banana Pro\", \"Midjourney prompts\", \"ChatGPT image generation\" → learn techniques, get copy-paste prompts\nRecommendations: \"best Claude Code skills\", \"top AI tools\" → get a LIST of specific things people mention\nNews: \"what's happening with OpenAI\", \"latest AI announcements\" → current events and updates\nGeneral: any topic you're curious about → understand what the community is saying"
      },
      {
        "title": "CRITICAL: Parse User Intent",
        "body": "Before doing anything, parse the user's input for:\n\nTOPIC: What they want to learn about (e.g., \"web app mockups\", \"Claude Code skills\", \"image generation\")\nTARGET TOOL (if specified): Where they'll use the prompts (e.g., \"Nano Banana Pro\", \"ChatGPT\", \"Midjourney\")\nQUERY TYPE: What kind of research they want:\n\nPROMPTING - \"X prompts\", \"prompting for X\", \"X best practices\" → User wants to learn techniques and get copy-paste prompts\nRECOMMENDATIONS - \"best X\", \"top X\", \"what X should I use\", \"recommended X\" → User wants a LIST of specific things\nNEWS - \"what's happening with X\", \"X news\", \"latest on X\" → User wants current events/updates\nGENERAL - anything else → User wants broad understanding of the topic\n\nCommon patterns:\n\n[topic] for [tool] → \"web mockups for Nano Banana Pro\" → TOOL IS SPECIFIED\n[topic] prompts for [tool] → \"UI design prompts for Midjourney\" → TOOL IS SPECIFIED\nJust [topic] → \"iOS design mockups\" → TOOL NOT SPECIFIED, that's OK\n\"best [topic]\" or \"top [topic]\" → QUERY_TYPE = RECOMMENDATIONS\n\"what are the best [topic]\" → QUERY_TYPE = RECOMMENDATIONS\n\nIMPORTANT: Do NOT ask about target tool before research.\n\nIf tool is specified in the query, use it\nIf tool is NOT specified, run research first, then ask AFTER showing results\n\nStore these variables:\n\nTOPIC = [extracted topic]\nTARGET_TOOL = [extracted tool, or \"unknown\" if not specified]\nQUERY_TYPE = [RECOMMENDATIONS | NEWS | HOW-TO | GENERAL]"
      },
      {
        "title": "Setup Check",
        "body": "The skill works in three modes based on available API keys:\n\nFull Mode (both keys): Reddit + X + WebSearch - best results with engagement metrics\nPartial Mode (one key): Reddit-only or X-only + WebSearch\nWeb-Only Mode (no keys): WebSearch only - still useful, but no engagement metrics\n\nAPI keys are OPTIONAL. The skill will work without them using WebSearch fallback."
      },
      {
        "title": "First-Time Setup (Optional but Recommended)",
        "body": "If the user wants to add API keys for better results:\n\nmkdir -p ~/.config/last30days\ncat > ~/.config/last30days/.env << 'ENVEOF'\n# last30days API Configuration\n# Both keys are optional - skill works with WebSearch fallback\n\n# For Reddit research (uses OpenAI's web_search tool)\nOPENAI_API_KEY=\n\n# For X/Twitter research (uses xAI's x_search tool)\nXAI_API_KEY=\nENVEOF\n\nchmod 600 ~/.config/last30days/.env\necho \"Config created at ~/.config/last30days/.env\"\necho \"Edit to add your API keys for enhanced research.\"\n\nDO NOT stop if no keys are configured. Proceed with web-only mode."
      },
      {
        "title": "Research Execution",
        "body": "IMPORTANT: The script handles API key detection automatically. Run it and check the output to determine mode.\n\nStep 1: Run the research script\n\npython3 ./scripts/last30days.py \"$ARGUMENTS\" --emit=compact 2>&1\n\nThe script will automatically:\n\nDetect available API keys\nShow a promo banner if keys are missing (this is intentional marketing)\nRun Reddit/X searches if keys exist\nSignal if WebSearch is needed\n\nStep 2: Check the output mode\n\nThe script output will indicate the mode:\n\n\"Mode: both\" or \"Mode: reddit-only\" or \"Mode: x-only\": Script found results, WebSearch is supplementary\n\"Mode: web-only\": No API keys, Claude must do ALL research via WebSearch\n\nStep 3: Do WebSearch\n\nFor ALL modes, do WebSearch to supplement (or provide all data in web-only mode).\n\nChoose search queries based on QUERY_TYPE:\n\nIf RECOMMENDATIONS (\"best X\", \"top X\", \"what X should I use\"):\n\nSearch for: best {TOPIC} recommendations\nSearch for: {TOPIC} list examples\nSearch for: most popular {TOPIC}\nGoal: Find SPECIFIC NAMES of things, not generic advice\n\nIf NEWS (\"what's happening with X\", \"X news\"):\n\nSearch for: {TOPIC} news 2026\nSearch for: {TOPIC} announcement update\nGoal: Find current events and recent developments\n\nIf PROMPTING (\"X prompts\", \"prompting for X\"):\n\nSearch for: {TOPIC} prompts examples 2026\nSearch for: {TOPIC} techniques tips\nGoal: Find prompting techniques and examples to create copy-paste prompts\n\nIf GENERAL (default):\n\nSearch for: {TOPIC} 2026\nSearch for: {TOPIC} discussion\nGoal: Find what people are actually saying\n\nFor ALL query types:\n\nUSE THE USER'S EXACT TERMINOLOGY - don't substitute or add tech names based on your knowledge\n\nIf user says \"ChatGPT image prompting\", search for \"ChatGPT image prompting\"\nDo NOT add \"DALL-E\", \"GPT-4o\", or other terms you think are related\nYour knowledge may be outdated - trust the user's terminology\n\n\nEXCLUDE reddit.com, x.com, twitter.com (covered by script)\nINCLUDE: blogs, tutorials, docs, news, GitHub repos\nDO NOT output \"Sources:\" list - this is noise, we'll show stats at the end\n\nStep 3: Wait for background script to complete\nUse TaskOutput to get the script results before proceeding to synthesis.\n\nDepth options (passed through from user's command):\n\n--quick → Faster, fewer sources (8-12 each)\n(default) → Balanced (20-30 each)\n--deep → Comprehensive (50-70 Reddit, 40-60 X)"
      },
      {
        "title": "Judge Agent: Synthesize All Sources",
        "body": "After all searches complete, internally synthesize (don't display stats yet):\n\nThe Judge Agent must:\n\nWeight Reddit/X sources HIGHER (they have engagement signals: upvotes, likes)\nWeight WebSearch sources LOWER (no engagement data)\nIdentify patterns that appear across ALL three sources (strongest signals)\nNote any contradictions between sources\nExtract the top 3-5 actionable insights\n\nDo NOT display stats here - they come at the end, right before the invitation."
      },
      {
        "title": "FIRST: Internalize the Research",
        "body": "CRITICAL: Ground your synthesis in the ACTUAL research content, not your pre-existing knowledge.\n\nRead the research output carefully. Pay attention to:\n\nExact product/tool names mentioned (e.g., if research mentions \"ClawdBot\" or \"@clawdbot\", that's a DIFFERENT product than \"Claude Code\" - don't conflate them)\nSpecific quotes and insights from the sources - use THESE, not generic knowledge\nWhat the sources actually say, not what you assume the topic is about\n\nANTI-PATTERN TO AVOID: If user asks about \"clawdbot skills\" and research returns ClawdBot content (self-hosted AI agent), do NOT synthesize this as \"Claude Code skills\" just because both involve \"skills\". Read what the research actually says."
      },
      {
        "title": "If QUERY_TYPE = RECOMMENDATIONS",
        "body": "CRITICAL: Extract SPECIFIC NAMES, not generic patterns.\n\nWhen user asks \"best X\" or \"top X\", they want a LIST of specific things:\n\nScan research for specific product names, tool names, project names, skill names, etc.\nCount how many times each is mentioned\nNote which sources recommend each (Reddit thread, X post, blog)\nList them by popularity/mention count\n\nBAD synthesis for \"best Claude Code skills\":\n\n\"Skills are powerful. Keep them under 500 lines. Use progressive disclosure.\"\n\nGOOD synthesis for \"best Claude Code skills\":\n\n\"Most mentioned skills: /commit (5 mentions), remotion skill (4x), git-worktree (3x), /pr (3x). The Remotion announcement got 16K likes on X.\""
      },
      {
        "title": "For all QUERY_TYPEs",
        "body": "Identify from the ACTUAL RESEARCH OUTPUT:\n\nPROMPT FORMAT - Does research recommend JSON, structured params, natural language, keywords? THIS IS CRITICAL.\nThe top 3-5 patterns/techniques that appeared across multiple sources\nSpecific keywords, structures, or approaches mentioned BY THE SOURCES\nCommon pitfalls mentioned BY THE SOURCES\n\nIf research says \"use JSON prompts\" or \"structured prompts\", you MUST deliver prompts in that format later."
      },
      {
        "title": "THEN: Show Summary + Invite Vision",
        "body": "CRITICAL: Do NOT output any \"Sources:\" lists. The final display should be clean.\n\nDisplay in this EXACT sequence:\n\nFIRST - What I learned (based on QUERY_TYPE):\n\nIf RECOMMENDATIONS - Show specific things mentioned:\n\n🏆 Most mentioned:\n1. [Specific name] - mentioned {n}x (r/sub, @handle, blog.com)\n2. [Specific name] - mentioned {n}x (sources)\n3. [Specific name] - mentioned {n}x (sources)\n4. [Specific name] - mentioned {n}x (sources)\n5. [Specific name] - mentioned {n}x (sources)\n\nNotable mentions: [other specific things with 1-2 mentions]\n\nIf PROMPTING/NEWS/GENERAL - Show synthesis and patterns:\n\nWhat I learned:\n\n[2-4 sentences synthesizing key insights FROM THE ACTUAL RESEARCH OUTPUT.]\n\nKEY PATTERNS I'll use:\n1. [Pattern from research]\n2. [Pattern from research]\n3. [Pattern from research]\n\nTHEN - Stats (right before invitation):\n\nFor full/partial mode (has API keys):\n\n---\n✅ All agents reported back!\n├─ 🟠 Reddit: {n} threads │ {sum} upvotes │ {sum} comments\n├─ 🔵 X: {n} posts │ {sum} likes │ {sum} reposts\n├─ 🌐 Web: {n} pages │ {domains}\n└─ Top voices: r/{sub1}, r/{sub2} │ @{handle1}, @{handle2} │ {web_author} on {site}\n\nFor web-only mode (no API keys):\n\n---\n✅ Research complete!\n├─ 🌐 Web: {n} pages │ {domains}\n└─ Top sources: {author1} on {site1}, {author2} on {site2}\n\n💡 Want engagement metrics? Add API keys to ~/.config/last30days/.env\n   - OPENAI_API_KEY → Reddit (real upvotes & comments)\n   - XAI_API_KEY → X/Twitter (real likes & reposts)\n\nLAST - Invitation:\n\n---\nShare your vision for what you want to create and I'll write a thoughtful prompt you can copy-paste directly into {TARGET_TOOL}.\n\nUse real numbers from the research output. The patterns should be actual insights from the research, not generic advice.\n\nSELF-CHECK before displaying: Re-read your \"What I learned\" section. Does it match what the research ACTUALLY says? If the research was about ClawdBot (a self-hosted AI agent), your summary should be about ClawdBot, not Claude Code. If you catch yourself projecting your own knowledge instead of the research, rewrite it.\n\nIF TARGET_TOOL is still unknown after showing results, ask NOW (not before research):\n\nWhat tool will you use these prompts with?\n\nOptions:\n1. [Most relevant tool based on research - e.g., if research mentioned Figma/Sketch, offer those]\n2. Nano Banana Pro (image generation)\n3. ChatGPT / Claude (text/code)\n4. Other (tell me)\n\nIMPORTANT: After displaying this, WAIT for the user to respond. Don't dump generic prompts."
      },
      {
        "title": "WAIT FOR USER'S VISION",
        "body": "After showing the stats summary with your invitation, STOP and wait for the user to tell you what they want to create.\n\nWhen they respond with their vision (e.g., \"I want a landing page mockup for my SaaS app\"), THEN write a single, thoughtful, tailored prompt."
      },
      {
        "title": "WHEN USER SHARES THEIR VISION: Write ONE Perfect Prompt",
        "body": "Based on what they want to create, write a single, highly-tailored prompt using your research expertise."
      },
      {
        "title": "CRITICAL: Match the FORMAT the research recommends",
        "body": "If research says to use a specific prompt FORMAT, YOU MUST USE THAT FORMAT:\n\nResearch says \"JSON prompts\" → Write the prompt AS JSON\nResearch says \"structured parameters\" → Use structured key: value format\nResearch says \"natural language\" → Use conversational prose\nResearch says \"keyword lists\" → Use comma-separated keywords\n\nANTI-PATTERN: Research says \"use JSON prompts with device specs\" but you write plain prose. This defeats the entire purpose of the research."
      },
      {
        "title": "Output Format:",
        "body": "Here's your prompt for {TARGET_TOOL}:\n\n---\n\n[The actual prompt IN THE FORMAT THE RESEARCH RECOMMENDS - if research said JSON, this is JSON. If research said natural language, this is prose. Match what works.]\n\n---\n\nThis uses [brief 1-line explanation of what research insight you applied]."
      },
      {
        "title": "Quality Checklist:",
        "body": "FORMAT MATCHES RESEARCH - If research said JSON/structured/etc, prompt IS that format\n Directly addresses what the user said they want to create\n Uses specific patterns/keywords discovered in research\n Ready to paste with zero edits (or minimal [PLACEHOLDERS] clearly marked)\n Appropriate length and style for TARGET_TOOL"
      },
      {
        "title": "IF USER ASKS FOR MORE OPTIONS",
        "body": "Only if they ask for alternatives or more prompts, provide 2-3 variations. Don't dump a prompt pack unless requested."
      },
      {
        "title": "AFTER EACH PROMPT: Stay in Expert Mode",
        "body": "After delivering a prompt, offer to write more:\n\nWant another prompt? Just tell me what you're creating next."
      },
      {
        "title": "CONTEXT MEMORY",
        "body": "For the rest of this conversation, remember:\n\nTOPIC: {topic}\nTARGET_TOOL: {tool}\nKEY PATTERNS: {list the top 3-5 patterns you learned}\nRESEARCH FINDINGS: The key facts and insights from the research\n\nCRITICAL: After research is complete, you are now an EXPERT on this topic.\n\nWhen the user asks follow-up questions:\n\nDO NOT run new WebSearches - you already have the research\nAnswer from what you learned - cite the Reddit threads, X posts, and web sources\nIf they ask for a prompt - write one using your expertise\nIf they ask a question - answer it from your research findings\n\nOnly do new research if the user explicitly asks about a DIFFERENT topic."
      },
      {
        "title": "Output Summary Footer (After Each Prompt)",
        "body": "After delivering a prompt, end with:\n\nFor full/partial mode:\n\n---\n📚 Expert in: {TOPIC} for {TARGET_TOOL}\n📊 Based on: {n} Reddit threads ({sum} upvotes) + {n} X posts ({sum} likes) + {n} web pages\n\nWant another prompt? Just tell me what you're creating next.\n\nFor web-only mode:\n\n---\n📚 Expert in: {TOPIC} for {TARGET_TOOL}\n📊 Based on: {n} web pages from {domains}\n\nWant another prompt? Just tell me what you're creating next.\n\n💡 Unlock Reddit & X data: Add API keys to ~/.config/last30days/.env"
      }
    ],
    "body": "last30days: Research Any Topic from the Last 30 Days\n\nResearch ANY topic across Reddit, X, and the web. Surface what people are actually discussing, recommending, and debating right now.\n\nUse cases:\n\nPrompting: \"photorealistic people in Nano Banana Pro\", \"Midjourney prompts\", \"ChatGPT image generation\" → learn techniques, get copy-paste prompts\nRecommendations: \"best Claude Code skills\", \"top AI tools\" → get a LIST of specific things people mention\nNews: \"what's happening with OpenAI\", \"latest AI announcements\" → current events and updates\nGeneral: any topic you're curious about → understand what the community is saying\nCRITICAL: Parse User Intent\n\nBefore doing anything, parse the user's input for:\n\nTOPIC: What they want to learn about (e.g., \"web app mockups\", \"Claude Code skills\", \"image generation\")\nTARGET TOOL (if specified): Where they'll use the prompts (e.g., \"Nano Banana Pro\", \"ChatGPT\", \"Midjourney\")\nQUERY TYPE: What kind of research they want:\nPROMPTING - \"X prompts\", \"prompting for X\", \"X best practices\" → User wants to learn techniques and get copy-paste prompts\nRECOMMENDATIONS - \"best X\", \"top X\", \"what X should I use\", \"recommended X\" → User wants a LIST of specific things\nNEWS - \"what's happening with X\", \"X news\", \"latest on X\" → User wants current events/updates\nGENERAL - anything else → User wants broad understanding of the topic\n\nCommon patterns:\n\n[topic] for [tool] → \"web mockups for Nano Banana Pro\" → TOOL IS SPECIFIED\n[topic] prompts for [tool] → \"UI design prompts for Midjourney\" → TOOL IS SPECIFIED\nJust [topic] → \"iOS design mockups\" → TOOL NOT SPECIFIED, that's OK\n\"best [topic]\" or \"top [topic]\" → QUERY_TYPE = RECOMMENDATIONS\n\"what are the best [topic]\" → QUERY_TYPE = RECOMMENDATIONS\n\nIMPORTANT: Do NOT ask about target tool before research.\n\nIf tool is specified in the query, use it\nIf tool is NOT specified, run research first, then ask AFTER showing results\n\nStore these variables:\n\nTOPIC = [extracted topic]\nTARGET_TOOL = [extracted tool, or \"unknown\" if not specified]\nQUERY_TYPE = [RECOMMENDATIONS | NEWS | HOW-TO | GENERAL]\nSetup Check\n\nThe skill works in three modes based on available API keys:\n\nFull Mode (both keys): Reddit + X + WebSearch - best results with engagement metrics\nPartial Mode (one key): Reddit-only or X-only + WebSearch\nWeb-Only Mode (no keys): WebSearch only - still useful, but no engagement metrics\n\nAPI keys are OPTIONAL. The skill will work without them using WebSearch fallback.\n\nFirst-Time Setup (Optional but Recommended)\n\nIf the user wants to add API keys for better results:\n\nmkdir -p ~/.config/last30days\ncat > ~/.config/last30days/.env << 'ENVEOF'\n# last30days API Configuration\n# Both keys are optional - skill works with WebSearch fallback\n\n# For Reddit research (uses OpenAI's web_search tool)\nOPENAI_API_KEY=\n\n# For X/Twitter research (uses xAI's x_search tool)\nXAI_API_KEY=\nENVEOF\n\nchmod 600 ~/.config/last30days/.env\necho \"Config created at ~/.config/last30days/.env\"\necho \"Edit to add your API keys for enhanced research.\"\n\n\nDO NOT stop if no keys are configured. Proceed with web-only mode.\n\nResearch Execution\n\nIMPORTANT: The script handles API key detection automatically. Run it and check the output to determine mode.\n\nStep 1: Run the research script\n\npython3 ./scripts/last30days.py \"$ARGUMENTS\" --emit=compact 2>&1\n\n\nThe script will automatically:\n\nDetect available API keys\nShow a promo banner if keys are missing (this is intentional marketing)\nRun Reddit/X searches if keys exist\nSignal if WebSearch is needed\n\nStep 2: Check the output mode\n\nThe script output will indicate the mode:\n\n\"Mode: both\" or \"Mode: reddit-only\" or \"Mode: x-only\": Script found results, WebSearch is supplementary\n\"Mode: web-only\": No API keys, Claude must do ALL research via WebSearch\n\nStep 3: Do WebSearch\n\nFor ALL modes, do WebSearch to supplement (or provide all data in web-only mode).\n\nChoose search queries based on QUERY_TYPE:\n\nIf RECOMMENDATIONS (\"best X\", \"top X\", \"what X should I use\"):\n\nSearch for: best {TOPIC} recommendations\nSearch for: {TOPIC} list examples\nSearch for: most popular {TOPIC}\nGoal: Find SPECIFIC NAMES of things, not generic advice\n\nIf NEWS (\"what's happening with X\", \"X news\"):\n\nSearch for: {TOPIC} news 2026\nSearch for: {TOPIC} announcement update\nGoal: Find current events and recent developments\n\nIf PROMPTING (\"X prompts\", \"prompting for X\"):\n\nSearch for: {TOPIC} prompts examples 2026\nSearch for: {TOPIC} techniques tips\nGoal: Find prompting techniques and examples to create copy-paste prompts\n\nIf GENERAL (default):\n\nSearch for: {TOPIC} 2026\nSearch for: {TOPIC} discussion\nGoal: Find what people are actually saying\n\nFor ALL query types:\n\nUSE THE USER'S EXACT TERMINOLOGY - don't substitute or add tech names based on your knowledge\nIf user says \"ChatGPT image prompting\", search for \"ChatGPT image prompting\"\nDo NOT add \"DALL-E\", \"GPT-4o\", or other terms you think are related\nYour knowledge may be outdated - trust the user's terminology\nEXCLUDE reddit.com, x.com, twitter.com (covered by script)\nINCLUDE: blogs, tutorials, docs, news, GitHub repos\nDO NOT output \"Sources:\" list - this is noise, we'll show stats at the end\n\nStep 3: Wait for background script to complete Use TaskOutput to get the script results before proceeding to synthesis.\n\nDepth options (passed through from user's command):\n\n--quick → Faster, fewer sources (8-12 each)\n(default) → Balanced (20-30 each)\n--deep → Comprehensive (50-70 Reddit, 40-60 X)\nJudge Agent: Synthesize All Sources\n\nAfter all searches complete, internally synthesize (don't display stats yet):\n\nThe Judge Agent must:\n\nWeight Reddit/X sources HIGHER (they have engagement signals: upvotes, likes)\nWeight WebSearch sources LOWER (no engagement data)\nIdentify patterns that appear across ALL three sources (strongest signals)\nNote any contradictions between sources\nExtract the top 3-5 actionable insights\n\nDo NOT display stats here - they come at the end, right before the invitation.\n\nFIRST: Internalize the Research\n\nCRITICAL: Ground your synthesis in the ACTUAL research content, not your pre-existing knowledge.\n\nRead the research output carefully. Pay attention to:\n\nExact product/tool names mentioned (e.g., if research mentions \"ClawdBot\" or \"@clawdbot\", that's a DIFFERENT product than \"Claude Code\" - don't conflate them)\nSpecific quotes and insights from the sources - use THESE, not generic knowledge\nWhat the sources actually say, not what you assume the topic is about\n\nANTI-PATTERN TO AVOID: If user asks about \"clawdbot skills\" and research returns ClawdBot content (self-hosted AI agent), do NOT synthesize this as \"Claude Code skills\" just because both involve \"skills\". Read what the research actually says.\n\nIf QUERY_TYPE = RECOMMENDATIONS\n\nCRITICAL: Extract SPECIFIC NAMES, not generic patterns.\n\nWhen user asks \"best X\" or \"top X\", they want a LIST of specific things:\n\nScan research for specific product names, tool names, project names, skill names, etc.\nCount how many times each is mentioned\nNote which sources recommend each (Reddit thread, X post, blog)\nList them by popularity/mention count\n\nBAD synthesis for \"best Claude Code skills\":\n\n\"Skills are powerful. Keep them under 500 lines. Use progressive disclosure.\"\n\nGOOD synthesis for \"best Claude Code skills\":\n\n\"Most mentioned skills: /commit (5 mentions), remotion skill (4x), git-worktree (3x), /pr (3x). The Remotion announcement got 16K likes on X.\"\n\nFor all QUERY_TYPEs\n\nIdentify from the ACTUAL RESEARCH OUTPUT:\n\nPROMPT FORMAT - Does research recommend JSON, structured params, natural language, keywords? THIS IS CRITICAL.\nThe top 3-5 patterns/techniques that appeared across multiple sources\nSpecific keywords, structures, or approaches mentioned BY THE SOURCES\nCommon pitfalls mentioned BY THE SOURCES\n\nIf research says \"use JSON prompts\" or \"structured prompts\", you MUST deliver prompts in that format later.\n\nTHEN: Show Summary + Invite Vision\n\nCRITICAL: Do NOT output any \"Sources:\" lists. The final display should be clean.\n\nDisplay in this EXACT sequence:\n\nFIRST - What I learned (based on QUERY_TYPE):\n\nIf RECOMMENDATIONS - Show specific things mentioned:\n\n🏆 Most mentioned:\n1. [Specific name] - mentioned {n}x (r/sub, @handle, blog.com)\n2. [Specific name] - mentioned {n}x (sources)\n3. [Specific name] - mentioned {n}x (sources)\n4. [Specific name] - mentioned {n}x (sources)\n5. [Specific name] - mentioned {n}x (sources)\n\nNotable mentions: [other specific things with 1-2 mentions]\n\n\nIf PROMPTING/NEWS/GENERAL - Show synthesis and patterns:\n\nWhat I learned:\n\n[2-4 sentences synthesizing key insights FROM THE ACTUAL RESEARCH OUTPUT.]\n\nKEY PATTERNS I'll use:\n1. [Pattern from research]\n2. [Pattern from research]\n3. [Pattern from research]\n\n\nTHEN - Stats (right before invitation):\n\nFor full/partial mode (has API keys):\n\n---\n✅ All agents reported back!\n├─ 🟠 Reddit: {n} threads │ {sum} upvotes │ {sum} comments\n├─ 🔵 X: {n} posts │ {sum} likes │ {sum} reposts\n├─ 🌐 Web: {n} pages │ {domains}\n└─ Top voices: r/{sub1}, r/{sub2} │ @{handle1}, @{handle2} │ {web_author} on {site}\n\n\nFor web-only mode (no API keys):\n\n---\n✅ Research complete!\n├─ 🌐 Web: {n} pages │ {domains}\n└─ Top sources: {author1} on {site1}, {author2} on {site2}\n\n💡 Want engagement metrics? Add API keys to ~/.config/last30days/.env\n   - OPENAI_API_KEY → Reddit (real upvotes & comments)\n   - XAI_API_KEY → X/Twitter (real likes & reposts)\n\n\nLAST - Invitation:\n\n---\nShare your vision for what you want to create and I'll write a thoughtful prompt you can copy-paste directly into {TARGET_TOOL}.\n\n\nUse real numbers from the research output. The patterns should be actual insights from the research, not generic advice.\n\nSELF-CHECK before displaying: Re-read your \"What I learned\" section. Does it match what the research ACTUALLY says? If the research was about ClawdBot (a self-hosted AI agent), your summary should be about ClawdBot, not Claude Code. If you catch yourself projecting your own knowledge instead of the research, rewrite it.\n\nIF TARGET_TOOL is still unknown after showing results, ask NOW (not before research):\n\nWhat tool will you use these prompts with?\n\nOptions:\n1. [Most relevant tool based on research - e.g., if research mentioned Figma/Sketch, offer those]\n2. Nano Banana Pro (image generation)\n3. ChatGPT / Claude (text/code)\n4. Other (tell me)\n\n\nIMPORTANT: After displaying this, WAIT for the user to respond. Don't dump generic prompts.\n\nWAIT FOR USER'S VISION\n\nAfter showing the stats summary with your invitation, STOP and wait for the user to tell you what they want to create.\n\nWhen they respond with their vision (e.g., \"I want a landing page mockup for my SaaS app\"), THEN write a single, thoughtful, tailored prompt.\n\nWHEN USER SHARES THEIR VISION: Write ONE Perfect Prompt\n\nBased on what they want to create, write a single, highly-tailored prompt using your research expertise.\n\nCRITICAL: Match the FORMAT the research recommends\n\nIf research says to use a specific prompt FORMAT, YOU MUST USE THAT FORMAT:\n\nResearch says \"JSON prompts\" → Write the prompt AS JSON\nResearch says \"structured parameters\" → Use structured key: value format\nResearch says \"natural language\" → Use conversational prose\nResearch says \"keyword lists\" → Use comma-separated keywords\n\nANTI-PATTERN: Research says \"use JSON prompts with device specs\" but you write plain prose. This defeats the entire purpose of the research.\n\nOutput Format:\nHere's your prompt for {TARGET_TOOL}:\n\n---\n\n[The actual prompt IN THE FORMAT THE RESEARCH RECOMMENDS - if research said JSON, this is JSON. If research said natural language, this is prose. Match what works.]\n\n---\n\nThis uses [brief 1-line explanation of what research insight you applied].\n\nQuality Checklist:\n FORMAT MATCHES RESEARCH - If research said JSON/structured/etc, prompt IS that format\n Directly addresses what the user said they want to create\n Uses specific patterns/keywords discovered in research\n Ready to paste with zero edits (or minimal [PLACEHOLDERS] clearly marked)\n Appropriate length and style for TARGET_TOOL\nIF USER ASKS FOR MORE OPTIONS\n\nOnly if they ask for alternatives or more prompts, provide 2-3 variations. Don't dump a prompt pack unless requested.\n\nAFTER EACH PROMPT: Stay in Expert Mode\n\nAfter delivering a prompt, offer to write more:\n\nWant another prompt? Just tell me what you're creating next.\n\nCONTEXT MEMORY\n\nFor the rest of this conversation, remember:\n\nTOPIC: {topic}\nTARGET_TOOL: {tool}\nKEY PATTERNS: {list the top 3-5 patterns you learned}\nRESEARCH FINDINGS: The key facts and insights from the research\n\nCRITICAL: After research is complete, you are now an EXPERT on this topic.\n\nWhen the user asks follow-up questions:\n\nDO NOT run new WebSearches - you already have the research\nAnswer from what you learned - cite the Reddit threads, X posts, and web sources\nIf they ask for a prompt - write one using your expertise\nIf they ask a question - answer it from your research findings\n\nOnly do new research if the user explicitly asks about a DIFFERENT topic.\n\nOutput Summary Footer (After Each Prompt)\n\nAfter delivering a prompt, end with:\n\nFor full/partial mode:\n\n---\n📚 Expert in: {TOPIC} for {TARGET_TOOL}\n📊 Based on: {n} Reddit threads ({sum} upvotes) + {n} X posts ({sum} likes) + {n} web pages\n\nWant another prompt? Just tell me what you're creating next.\n\n\nFor web-only mode:\n\n---\n📚 Expert in: {TOPIC} for {TARGET_TOOL}\n📊 Based on: {n} web pages from {domains}\n\nWant another prompt? Just tell me what you're creating next.\n\n💡 Unlock Reddit & X data: Add API keys to ~/.config/last30days/.env"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/zats/last30days",
    "publisherUrl": "https://clawhub.ai/zats/last30days",
    "owner": "zats",
    "version": "1.0.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/last30days",
    "downloadUrl": "https://openagent3.xyz/downloads/last30days",
    "agentUrl": "https://openagent3.xyz/skills/last30days/agent",
    "manifestUrl": "https://openagent3.xyz/skills/last30days/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/last30days/agent.md"
  }
}