{
  "schemaVersion": "1.0",
  "item": {
    "slug": "last30days-skill",
    "name": "Last30days Skill",
    "source": "tencent",
    "type": "skill",
    "category": "开发工具",
    "sourceUrl": "https://clawhub.ai/johnsonDevops/last30days-skill",
    "canonicalUrl": "https://clawhub.ai/johnsonDevops/last30days-skill",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/last30days-skill",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=last30days-skill",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "CHANGELOG.md",
      "README.md",
      "SKILL-original.md",
      "SKILL.md",
      "SPEC.md",
      "TASKS.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-30T16:55:25.780Z",
      "expiresAt": "2026-05-07T16:55:25.780Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
        "contentDisposition": "attachment; filename=\"network-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/last30days-skill"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/last30days-skill",
    "agentPageUrl": "https://openagent3.xyz/skills/last30days-skill/agent",
    "manifestUrl": "https://openagent3.xyz/skills/last30days-skill/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/last30days-skill/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "last30days v2.1: Research Any Topic from the Last 30 Days",
        "body": "Research ANY topic across Reddit, X, YouTube, and the web. Surface what people are actually discussing, recommending, and debating right now."
      },
      {
        "title": "CRITICAL: Parse User Intent",
        "body": "Before doing anything, parse the user's input for:\n\nTOPIC: What they want to learn about (e.g., \"web app mockups\", \"Claude Code skills\", \"image generation\")\nTARGET TOOL (if specified): Where they'll use the prompts (e.g., \"Nano Banana Pro\", \"ChatGPT\", \"Midjourney\")\nQUERY TYPE: What kind of research they want:\n\nPROMPTING - \"X prompts\", \"prompting for X\", \"X best practices\" → User wants to learn techniques and get copy-paste prompts\nRECOMMENDATIONS - \"best X\", \"top X\", \"what X should I use\", \"recommended X\" → User wants a LIST of specific things\nNEWS - \"what's happening with X\", \"X news\", \"latest on X\" → User wants current events/updates\nGENERAL - anything else → User wants broad understanding of the topic\n\nCommon patterns:\n\n[topic] for [tool] → \"web mockups for Nano Banana Pro\" → TOOL IS SPECIFIED\n[topic] prompts for [tool] → \"UI design prompts for Midjourney\" → TOOL IS SPECIFIED\nJust [topic] → \"iOS design mockups\" → TOOL NOT SPECIFIED, that's OK\n\"best [topic]\" or \"top [topic]\" → QUERY_TYPE = RECOMMENDATIONS\n\"what are the best [topic]\" → QUERY_TYPE = RECOMMENDATIONS\n\nIMPORTANT: Do NOT ask about target tool before research.\n\nIf tool is specified in the query, use it\nIf tool is NOT specified, run research first, then ask AFTER showing results\n\nStore these variables:\n\nTOPIC = [extracted topic]\nTARGET_TOOL = [extracted tool, or \"unknown\" if not specified]\nQUERY_TYPE = [RECOMMENDATIONS | NEWS | HOW-TO | GENERAL]\n\nDISPLAY your parsing to the user. Before running any tools, output:\n\nI'll research {TOPIC} across Reddit, X, and the web to find what's been discussed in the last 30 days.\n\nParsed intent:\n- TOPIC = {TOPIC}\n- TARGET_TOOL = {TARGET_TOOL or \"unknown\"}\n- QUERY_TYPE = {QUERY_TYPE}\n\nResearch typically takes 2-8 minutes (niche topics take longer). Starting now.\n\nIf TARGET_TOOL is known, mention it in the intro: \"...to find {QUERY_TYPE}-style content for use in {TARGET_TOOL}.\"\n\nThis text MUST appear before you call any tools. It confirms to the user that you understood their request."
      },
      {
        "title": "Research Execution",
        "body": "Step 1: Run the research script (FOREGROUND — do NOT background this)\n\nCRITICAL: Run this command in the FOREGROUND with a 5-minute timeout. Do NOT use run_in_background. The full output contains Reddit, X, AND YouTube data that you need to read completely.\n\n# Find skill root — works in repo checkout, Claude Code, or Codex install\nfor dir in \\\n  \".\" \\\n  \"${CLAUDE_PLUGIN_ROOT:-}\" \\\n  \"$HOME/.claude/skills/last30days\" \\\n  \"$HOME/.agents/skills/last30days\" \\\n  \"$HOME/.codex/skills/last30days\"; do\n  [ -n \"$dir\" ] && [ -f \"$dir/scripts/last30days.py\" ] && SKILL_ROOT=\"$dir\" && break\ndone\n\nif [ -z \"${SKILL_ROOT:-}\" ]; then\n  echo \"ERROR: Could not find scripts/last30days.py\" >&2\n  exit 1\nfi\n\npython3 \"${SKILL_ROOT}/scripts/last30days.py\" \"$ARGUMENTS\" --emit=compact\n\nUse a timeout of 300000 (5 minutes) on the Bash call. The script typically takes 1-3 minutes.\n\nThe script will automatically:\n\nDetect available API keys\nRun Reddit/X/YouTube searches\nOutput ALL results including YouTube transcripts\n\nRead the ENTIRE output. It contains THREE data sections in this order: Reddit items, X items, and YouTube items. If you miss the YouTube section, you will produce incomplete stats.\n\nYouTube items in the output look like: **{video_id}** (score:N) {channel_name} [N views, N likes] followed by a title, URL, and optional transcript snippet. Count them and include them in your synthesis and stats block."
      },
      {
        "title": "STEP 2: DO WEBSEARCH AFTER SCRIPT COMPLETES",
        "body": "After the script finishes, do WebSearch to supplement with blogs, tutorials, and news.\n\nFor ALL modes, do WebSearch to supplement (or provide all data in web-only mode).\n\nChoose search queries based on QUERY_TYPE:\n\nIf RECOMMENDATIONS (\"best X\", \"top X\", \"what X should I use\"):\n\nSearch for: best {TOPIC} recommendations\nSearch for: {TOPIC} list examples\nSearch for: most popular {TOPIC}\nGoal: Find SPECIFIC NAMES of things, not generic advice\n\nIf NEWS (\"what's happening with X\", \"X news\"):\n\nSearch for: {TOPIC} news 2026\nSearch for: {TOPIC} announcement update\nGoal: Find current events and recent developments\n\nIf PROMPTING (\"X prompts\", \"prompting for X\"):\n\nSearch for: {TOPIC} prompts examples 2026\nSearch for: {TOPIC} techniques tips\nGoal: Find prompting techniques and examples to create copy-paste prompts\n\nIf GENERAL (default):\n\nSearch for: {TOPIC} 2026\nSearch for: {TOPIC} discussion\nGoal: Find what people are actually saying\n\nFor ALL query types:\n\nUSE THE USER'S EXACT TERMINOLOGY - don't substitute or add tech names based on your knowledge\nEXCLUDE reddit.com, x.com, twitter.com (covered by script)\nINCLUDE: blogs, tutorials, docs, news, GitHub repos\nDO NOT output \"Sources:\" list - this is noise, we'll show stats at the end\n\nOptions (passed through from user's command):\n\n--days=N → Look back N days instead of 30 (e.g., --days=7 for weekly roundup)\n--quick → Faster, fewer sources (8-12 each)\n(default) → Balanced (20-30 each)\n--deep → Comprehensive (50-70 Reddit, 40-60 X)"
      },
      {
        "title": "Judge Agent: Synthesize All Sources",
        "body": "After all searches complete, internally synthesize (don't display stats yet):\n\nThe Judge Agent must:\n\nWeight Reddit/X sources HIGHER (they have engagement signals: upvotes, likes)\nWeight YouTube sources HIGH (they have views, likes, and transcript content)\nWeight WebSearch sources LOWER (no engagement data)\nIdentify patterns that appear across ALL sources (strongest signals)\nNote any contradictions between sources\nExtract the top 3-5 actionable insights\n\nDo NOT display stats here - they come at the end, right before the invitation."
      },
      {
        "title": "FIRST: Internalize the Research",
        "body": "CRITICAL: Ground your synthesis in the ACTUAL research content, not your pre-existing knowledge.\n\nRead the research output carefully. Pay attention to:\n\nExact product/tool names mentioned (e.g., if research mentions \"ClawdBot\" or \"@clawdbot\", that's a DIFFERENT product than \"Claude Code\" - don't conflate them)\nSpecific quotes and insights from the sources - use THESE, not generic knowledge\nWhat the sources actually say, not what you assume the topic is about\n\nANTI-PATTERN TO AVOID: If user asks about \"clawdbot skills\" and research returns ClawdBot content (self-hosted AI agent), do NOT synthesize this as \"Claude Code skills\" just because both involve \"skills\". Read what the research actually says."
      },
      {
        "title": "If QUERY_TYPE = RECOMMENDATIONS",
        "body": "CRITICAL: Extract SPECIFIC NAMES, not generic patterns.\n\nWhen user asks \"best X\" or \"top X\", they want a LIST of specific things:\n\nScan research for specific product names, tool names, project names, skill names, etc.\nCount how many times each is mentioned\nNote which sources recommend each (Reddit thread, X post, blog)\nList them by popularity/mention count\n\nBAD synthesis for \"best Claude Code skills\":\n\n\"Skills are powerful. Keep them under 500 lines. Use progressive disclosure.\"\n\nGOOD synthesis for \"best Claude Code skills\":\n\n\"Most mentioned skills: /commit (5 mentions), remotion skill (4x), git-worktree (3x), /pr (3x). The Remotion announcement got 16K likes on X.\""
      },
      {
        "title": "For all QUERY_TYPEs",
        "body": "Identify from the ACTUAL RESEARCH OUTPUT:\n\nPROMPT FORMAT - Does research recommend JSON, structured params, natural language, keywords?\nThe top 3-5 patterns/techniques that appeared across multiple sources\nSpecific keywords, structures, or approaches mentioned BY THE SOURCES\nCommon pitfalls mentioned BY THE SOURCES"
      },
      {
        "title": "THEN: Show Summary + Invite Vision",
        "body": "Display in this EXACT sequence:\n\nFIRST - What I learned (based on QUERY_TYPE):\n\nIf RECOMMENDATIONS - Show specific things mentioned with sources:\n\n🏆 Most mentioned:\n\n[Tool Name] - {n}x mentions\nUse Case: [what it does]\nSources: @handle1, @handle2, r/sub, blog.com\n\n[Tool Name] - {n}x mentions\nUse Case: [what it does]\nSources: @handle3, r/sub2, Complex\n\nNotable mentions: [other specific things with 1-2 mentions]\n\nCRITICAL for RECOMMENDATIONS:\n\nEach item MUST have a \"Sources:\" line with actual @handles from X posts (e.g., @LONGLIVE47, @ByDobson)\nInclude subreddit names (r/hiphopheads) and web sources (Complex, Variety)\nParse @handles from research output and include the highest-engagement ones\nFormat naturally - tables work well for wide terminals, stacked cards for narrow\n\nIf PROMPTING/NEWS/GENERAL - Show synthesis and patterns:\n\nCITATION RULE: Cite sources sparingly to prove research is real.\n\nIn the \"What I learned\" intro: cite 1-2 top sources total, not every sentence\nIn KEY PATTERNS: cite 1 source per pattern, short format: \"per @handle\" or \"per r/sub\"\nDo NOT include engagement metrics in citations (likes, upvotes) - save those for stats box\nDo NOT chain multiple citations: \"per @x, @y, @z\" is too much. Pick the strongest one.\n\nCITATION PRIORITY (most to least preferred):\n\n@handles from X — \"per @handle\" (these prove the tool's unique value)\nr/subreddits from Reddit — \"per r/subreddit\"\nYouTube channels — \"per [channel name] on YouTube\" (transcript-backed insights)\nWeb sources — ONLY when Reddit/X/YouTube don't cover that specific fact\n\nThe tool's value is surfacing what PEOPLE are saying, not what journalists wrote.\nWhen both a web article and an X post cover the same fact, cite the X post.\n\nURL FORMATTING: NEVER paste raw URLs in the output.\n\nBAD: \"per https://www.rollingstone.com/music/music-news/kanye-west-bully-1235506094/\"\nGOOD: \"per Rolling Stone\"\nGOOD: \"per Complex\"\nUse the publication name, not the URL. The user doesn't need links — they need clean, readable text.\n\nBAD: \"His album is set for March 20 (per Rolling Stone; Billboard; Complex).\"\nGOOD: \"His album BULLY drops March 20 — fans on X are split on the tracklist, per @honest30bgfan_\"\nGOOD: \"Ye's apology got massive traction on r/hiphopheads\"\nOK (web, only when Reddit/X don't have it): \"The Hellwatt Festival runs July 4-18 at RCF Arena, per Billboard\"\n\nLead with people, not publications. Start each topic with what Reddit/X\nusers are saying/feeling, then add web context only if needed. The user came\nhere for the conversation, not the press release.\n\nWhat I learned:\n\n**{Topic 1}** — [1-2 sentences about what people are saying, per @handle or r/sub]\n\n**{Topic 2}** — [1-2 sentences, per @handle or r/sub]\n\n**{Topic 3}** — [1-2 sentences, per @handle or r/sub]\n\nKEY PATTERNS from the research:\n1. [Pattern] — per @handle\n2. [Pattern] — per r/sub\n3. [Pattern] — per @handle\n\nTHEN - Stats (right before invitation):\n\nCRITICAL: Calculate actual totals from the research output.\n\nCount posts/threads from each section\nSum engagement: parse [Xlikes, Yrt] from each X post, [Xpts, Ycmt] from Reddit\nIdentify top voices: highest-engagement @handles from X, most active subreddits\n\nCopy this EXACTLY, replacing only the {placeholders}:\n\n---\n✅ All agents reported back!\n├─ 🟠 Reddit: {N} threads │ {N} upvotes │ {N} comments\n├─ 🔵 X: {N} posts │ {N} likes │ {N} reposts\n├─ 🔴 YouTube: {N} videos │ {N} views │ {N} with transcripts\n├─ 🌐 Web: {N} pages (supplementary)\n└─ 🗣️ Top voices: @{handle1} ({N} likes), @{handle2} │ r/{sub1}, r/{sub2}\n---\n\nIf Reddit returned 0 threads, write: \"├─ 🟠 Reddit: 0 threads (no results this cycle)\"\nIf YouTube returned 0 videos or yt-dlp is not installed, omit the YouTube line entirely.\nNEVER use plain text dashes (-) or pipe (|). ALWAYS use ├─ └─ │ and the emoji.\n\nSELF-CHECK before displaying: Re-read your \"What I learned\" section. Does it match what the research ACTUALLY says? If you catch yourself projecting your own knowledge instead of the research, rewrite it.\n\nLAST - Invitation (adapt to QUERY_TYPE):\n\nCRITICAL: Every invitation MUST include 2-3 specific example suggestions based on what you ACTUALLY learned from the research. Don't be generic — show the user you absorbed the content by referencing real things from the results.\n\nIf QUERY_TYPE = PROMPTING:\n\n---\nI'm now an expert on {TOPIC} for {TARGET_TOOL}. What do you want to make? For example:\n- [specific idea based on popular technique from research]\n- [specific idea based on trending style/approach from research]\n- [specific idea riffing on what people are actually creating]\n\nJust describe your vision and I'll write a prompt you can paste straight into {TARGET_TOOL}.\n\nIf QUERY_TYPE = RECOMMENDATIONS:\n\n---\nI'm now an expert on {TOPIC}. Want me to go deeper? For example:\n- [Compare specific item A vs item B from the results]\n- [Explain why item C is trending right now]\n- [Help you get started with item D]\n\nIf QUERY_TYPE = NEWS:\n\n---\nI'm now an expert on {TOPIC}. Some things you could ask:\n- [Specific follow-up question about the biggest story]\n- [Question about implications of a key development]\n- [Question about what might happen next based on current trajectory]\n\nIf QUERY_TYPE = GENERAL:\n\n---\nI'm now an expert on {TOPIC}. Some things I can help with:\n- [Specific question based on the most discussed aspect]\n- [Specific creative/practical application of what you learned]\n- [Deeper dive into a pattern or debate from the research]\n\nExample invitations (to show the quality bar):\n\nFor /last30days nano banana pro prompts for Gemini:\n\nI'm now an expert on Nano Banana Pro for Gemini. What do you want to make? For example:\n\nPhotorealistic product shots with natural lighting (the most requested style right now)\nLogo designs with embedded text (Gemini's new strength per the research)\nMulti-reference style transfer from a mood board\n\nJust describe your vision and I'll write a prompt you can paste straight into Gemini.\n\nFor /last30days kanye west (GENERAL):\n\nI'm now an expert on Kanye West. Some things I can help with:\n\nWhat's the real story behind the apology letter — genuine or PR move?\nBreak down the BULLY tracklist reactions and what fans are expecting\nCompare how Reddit vs X are reacting to the Bianca narrative\n\nFor /last30days war in Iran (NEWS):\n\nI'm now an expert on the Iran situation. Some things you could ask:\n\nWhat are the realistic escalation scenarios from here?\nHow is this playing differently in US vs international media?\nWhat's the economic impact on oil markets so far?"
      },
      {
        "title": "WAIT FOR USER'S RESPONSE",
        "body": "After showing the stats summary with your invitation, STOP and wait for the user to respond."
      },
      {
        "title": "WHEN USER RESPONDS",
        "body": "Read their response and match the intent:\n\nIf they ask a QUESTION about the topic → Answer from your research (no new searches, no prompt)\nIf they ask to GO DEEPER on a subtopic → Elaborate using your research findings\nIf they describe something they want to CREATE → Write ONE perfect prompt (see below)\nIf they ask for a PROMPT explicitly → Write ONE perfect prompt (see below)\n\nOnly write a prompt when the user wants one. Don't force a prompt on someone who asked \"what could happen next with Iran.\""
      },
      {
        "title": "Writing a Prompt",
        "body": "When the user wants a prompt, write a single, highly-tailored prompt using your research expertise."
      },
      {
        "title": "CRITICAL: Match the FORMAT the research recommends",
        "body": "If research says to use a specific prompt FORMAT, YOU MUST USE THAT FORMAT.\n\nANTI-PATTERN: Research says \"use JSON prompts with device specs\" but you write plain prose. This defeats the entire purpose of the research."
      },
      {
        "title": "Quality Checklist (run before delivering):",
        "body": "FORMAT MATCHES RESEARCH - If research said JSON/structured/etc, prompt IS that format\n Directly addresses what the user said they want to create\n Uses specific patterns/keywords discovered in research\n Ready to paste with zero edits (or minimal [PLACEHOLDERS] clearly marked)\n Appropriate length and style for TARGET_TOOL"
      },
      {
        "title": "Output Format:",
        "body": "Here's your prompt for {TARGET_TOOL}:\n\n---\n\n[The actual prompt IN THE FORMAT THE RESEARCH RECOMMENDS]\n\n---\n\nThis uses [brief 1-line explanation of what research insight you applied]."
      },
      {
        "title": "IF USER ASKS FOR MORE OPTIONS",
        "body": "Only if they ask for alternatives or more prompts, provide 2-3 variations. Don't dump a prompt pack unless requested."
      },
      {
        "title": "AFTER EACH PROMPT: Stay in Expert Mode",
        "body": "After delivering a prompt, offer to write more:\n\nWant another prompt? Just tell me what you're creating next."
      },
      {
        "title": "CONTEXT MEMORY",
        "body": "For the rest of this conversation, remember:\n\nTOPIC: {topic}\nTARGET_TOOL: {tool}\nKEY PATTERNS: {list the top 3-5 patterns you learned}\nRESEARCH FINDINGS: The key facts and insights from the research\n\nCRITICAL: After research is complete, you are now an EXPERT on this topic.\n\nWhen the user asks follow-up questions:\n\nDO NOT run new WebSearches - you already have the research\nAnswer from what you learned - cite the Reddit threads, X posts, and web sources\nIf they ask a question - answer it from your research findings\nIf they ask for a prompt - write one using your expertise\n\nOnly do new research if the user explicitly asks about a DIFFERENT topic."
      },
      {
        "title": "Output Summary Footer (After Each Prompt)",
        "body": "After delivering a prompt, end with:\n\n---\n📚 Expert in: {TOPIC} for {TARGET_TOOL}\n📊 Based on: {n} Reddit threads ({sum} upvotes) + {n} X posts ({sum} likes) + {n} YouTube videos ({sum} views) + {n} web pages\n\nWant another prompt? Just tell me what you're creating next."
      },
      {
        "title": "Security & Permissions",
        "body": "What this skill does:\n\nSends search queries to OpenAI's Responses API (api.openai.com) for Reddit discovery\nSends search queries to Twitter's GraphQL API (via browser cookie auth) or xAI's API (api.x.ai) for X search\nRuns yt-dlp locally for YouTube search and transcript extraction (no API key, public data)\nOptionally sends search queries to Brave Search API, Parallel AI API, or OpenRouter API for web search\nFetches public Reddit thread data from reddit.com for engagement metrics\nStores research findings in local SQLite database (watchlist mode only)\n\nWhat this skill does NOT do:\n\nDoes not post, like, or modify content on any platform\nDoes not access your Reddit, X, or YouTube accounts\nDoes not share API keys between providers (OpenAI key only goes to api.openai.com, etc.)\nDoes not log, cache, or write API keys to output files\nDoes not send data to any endpoint not listed above\nCannot be invoked autonomously by the agent (disable-model-invocation: true)\n\nBundled scripts: scripts/last30days.py (main research engine), scripts/lib/ (search, enrichment, rendering modules), scripts/lib/vendor/bird-search/ (vendored X search client, MIT licensed)\n\nReview scripts before first use to verify behavior."
      }
    ],
    "body": "last30days v2.1: Research Any Topic from the Last 30 Days\n\nResearch ANY topic across Reddit, X, YouTube, and the web. Surface what people are actually discussing, recommending, and debating right now.\n\nCRITICAL: Parse User Intent\n\nBefore doing anything, parse the user's input for:\n\nTOPIC: What they want to learn about (e.g., \"web app mockups\", \"Claude Code skills\", \"image generation\")\nTARGET TOOL (if specified): Where they'll use the prompts (e.g., \"Nano Banana Pro\", \"ChatGPT\", \"Midjourney\")\nQUERY TYPE: What kind of research they want:\nPROMPTING - \"X prompts\", \"prompting for X\", \"X best practices\" → User wants to learn techniques and get copy-paste prompts\nRECOMMENDATIONS - \"best X\", \"top X\", \"what X should I use\", \"recommended X\" → User wants a LIST of specific things\nNEWS - \"what's happening with X\", \"X news\", \"latest on X\" → User wants current events/updates\nGENERAL - anything else → User wants broad understanding of the topic\n\nCommon patterns:\n\n[topic] for [tool] → \"web mockups for Nano Banana Pro\" → TOOL IS SPECIFIED\n[topic] prompts for [tool] → \"UI design prompts for Midjourney\" → TOOL IS SPECIFIED\nJust [topic] → \"iOS design mockups\" → TOOL NOT SPECIFIED, that's OK\n\"best [topic]\" or \"top [topic]\" → QUERY_TYPE = RECOMMENDATIONS\n\"what are the best [topic]\" → QUERY_TYPE = RECOMMENDATIONS\n\nIMPORTANT: Do NOT ask about target tool before research.\n\nIf tool is specified in the query, use it\nIf tool is NOT specified, run research first, then ask AFTER showing results\n\nStore these variables:\n\nTOPIC = [extracted topic]\nTARGET_TOOL = [extracted tool, or \"unknown\" if not specified]\nQUERY_TYPE = [RECOMMENDATIONS | NEWS | HOW-TO | GENERAL]\n\nDISPLAY your parsing to the user. Before running any tools, output:\n\nI'll research {TOPIC} across Reddit, X, and the web to find what's been discussed in the last 30 days.\n\nParsed intent:\n- TOPIC = {TOPIC}\n- TARGET_TOOL = {TARGET_TOOL or \"unknown\"}\n- QUERY_TYPE = {QUERY_TYPE}\n\nResearch typically takes 2-8 minutes (niche topics take longer). Starting now.\n\n\nIf TARGET_TOOL is known, mention it in the intro: \"...to find {QUERY_TYPE}-style content for use in {TARGET_TOOL}.\"\n\nThis text MUST appear before you call any tools. It confirms to the user that you understood their request.\n\nResearch Execution\n\nStep 1: Run the research script (FOREGROUND — do NOT background this)\n\nCRITICAL: Run this command in the FOREGROUND with a 5-minute timeout. Do NOT use run_in_background. The full output contains Reddit, X, AND YouTube data that you need to read completely.\n\n# Find skill root — works in repo checkout, Claude Code, or Codex install\nfor dir in \\\n  \".\" \\\n  \"${CLAUDE_PLUGIN_ROOT:-}\" \\\n  \"$HOME/.claude/skills/last30days\" \\\n  \"$HOME/.agents/skills/last30days\" \\\n  \"$HOME/.codex/skills/last30days\"; do\n  [ -n \"$dir\" ] && [ -f \"$dir/scripts/last30days.py\" ] && SKILL_ROOT=\"$dir\" && break\ndone\n\nif [ -z \"${SKILL_ROOT:-}\" ]; then\n  echo \"ERROR: Could not find scripts/last30days.py\" >&2\n  exit 1\nfi\n\npython3 \"${SKILL_ROOT}/scripts/last30days.py\" \"$ARGUMENTS\" --emit=compact\n\n\nUse a timeout of 300000 (5 minutes) on the Bash call. The script typically takes 1-3 minutes.\n\nThe script will automatically:\n\nDetect available API keys\nRun Reddit/X/YouTube searches\nOutput ALL results including YouTube transcripts\n\nRead the ENTIRE output. It contains THREE data sections in this order: Reddit items, X items, and YouTube items. If you miss the YouTube section, you will produce incomplete stats.\n\nYouTube items in the output look like: **{video_id}** (score:N) {channel_name} [N views, N likes] followed by a title, URL, and optional transcript snippet. Count them and include them in your synthesis and stats block.\n\nSTEP 2: DO WEBSEARCH AFTER SCRIPT COMPLETES\n\nAfter the script finishes, do WebSearch to supplement with blogs, tutorials, and news.\n\nFor ALL modes, do WebSearch to supplement (or provide all data in web-only mode).\n\nChoose search queries based on QUERY_TYPE:\n\nIf RECOMMENDATIONS (\"best X\", \"top X\", \"what X should I use\"):\n\nSearch for: best {TOPIC} recommendations\nSearch for: {TOPIC} list examples\nSearch for: most popular {TOPIC}\nGoal: Find SPECIFIC NAMES of things, not generic advice\n\nIf NEWS (\"what's happening with X\", \"X news\"):\n\nSearch for: {TOPIC} news 2026\nSearch for: {TOPIC} announcement update\nGoal: Find current events and recent developments\n\nIf PROMPTING (\"X prompts\", \"prompting for X\"):\n\nSearch for: {TOPIC} prompts examples 2026\nSearch for: {TOPIC} techniques tips\nGoal: Find prompting techniques and examples to create copy-paste prompts\n\nIf GENERAL (default):\n\nSearch for: {TOPIC} 2026\nSearch for: {TOPIC} discussion\nGoal: Find what people are actually saying\n\nFor ALL query types:\n\nUSE THE USER'S EXACT TERMINOLOGY - don't substitute or add tech names based on your knowledge\nEXCLUDE reddit.com, x.com, twitter.com (covered by script)\nINCLUDE: blogs, tutorials, docs, news, GitHub repos\nDO NOT output \"Sources:\" list - this is noise, we'll show stats at the end\n\nOptions (passed through from user's command):\n\n--days=N → Look back N days instead of 30 (e.g., --days=7 for weekly roundup)\n--quick → Faster, fewer sources (8-12 each)\n(default) → Balanced (20-30 each)\n--deep → Comprehensive (50-70 Reddit, 40-60 X)\nJudge Agent: Synthesize All Sources\n\nAfter all searches complete, internally synthesize (don't display stats yet):\n\nThe Judge Agent must:\n\nWeight Reddit/X sources HIGHER (they have engagement signals: upvotes, likes)\nWeight YouTube sources HIGH (they have views, likes, and transcript content)\nWeight WebSearch sources LOWER (no engagement data)\nIdentify patterns that appear across ALL sources (strongest signals)\nNote any contradictions between sources\nExtract the top 3-5 actionable insights\n\nDo NOT display stats here - they come at the end, right before the invitation.\n\nFIRST: Internalize the Research\n\nCRITICAL: Ground your synthesis in the ACTUAL research content, not your pre-existing knowledge.\n\nRead the research output carefully. Pay attention to:\n\nExact product/tool names mentioned (e.g., if research mentions \"ClawdBot\" or \"@clawdbot\", that's a DIFFERENT product than \"Claude Code\" - don't conflate them)\nSpecific quotes and insights from the sources - use THESE, not generic knowledge\nWhat the sources actually say, not what you assume the topic is about\n\nANTI-PATTERN TO AVOID: If user asks about \"clawdbot skills\" and research returns ClawdBot content (self-hosted AI agent), do NOT synthesize this as \"Claude Code skills\" just because both involve \"skills\". Read what the research actually says.\n\nIf QUERY_TYPE = RECOMMENDATIONS\n\nCRITICAL: Extract SPECIFIC NAMES, not generic patterns.\n\nWhen user asks \"best X\" or \"top X\", they want a LIST of specific things:\n\nScan research for specific product names, tool names, project names, skill names, etc.\nCount how many times each is mentioned\nNote which sources recommend each (Reddit thread, X post, blog)\nList them by popularity/mention count\n\nBAD synthesis for \"best Claude Code skills\":\n\n\"Skills are powerful. Keep them under 500 lines. Use progressive disclosure.\"\n\nGOOD synthesis for \"best Claude Code skills\":\n\n\"Most mentioned skills: /commit (5 mentions), remotion skill (4x), git-worktree (3x), /pr (3x). The Remotion announcement got 16K likes on X.\"\n\nFor all QUERY_TYPEs\n\nIdentify from the ACTUAL RESEARCH OUTPUT:\n\nPROMPT FORMAT - Does research recommend JSON, structured params, natural language, keywords?\nThe top 3-5 patterns/techniques that appeared across multiple sources\nSpecific keywords, structures, or approaches mentioned BY THE SOURCES\nCommon pitfalls mentioned BY THE SOURCES\nTHEN: Show Summary + Invite Vision\n\nDisplay in this EXACT sequence:\n\nFIRST - What I learned (based on QUERY_TYPE):\n\nIf RECOMMENDATIONS - Show specific things mentioned with sources:\n\n🏆 Most mentioned:\n\n[Tool Name] - {n}x mentions\nUse Case: [what it does]\nSources: @handle1, @handle2, r/sub, blog.com\n\n[Tool Name] - {n}x mentions\nUse Case: [what it does]\nSources: @handle3, r/sub2, Complex\n\nNotable mentions: [other specific things with 1-2 mentions]\n\n\nCRITICAL for RECOMMENDATIONS:\n\nEach item MUST have a \"Sources:\" line with actual @handles from X posts (e.g., @LONGLIVE47, @ByDobson)\nInclude subreddit names (r/hiphopheads) and web sources (Complex, Variety)\nParse @handles from research output and include the highest-engagement ones\nFormat naturally - tables work well for wide terminals, stacked cards for narrow\n\nIf PROMPTING/NEWS/GENERAL - Show synthesis and patterns:\n\nCITATION RULE: Cite sources sparingly to prove research is real.\n\nIn the \"What I learned\" intro: cite 1-2 top sources total, not every sentence\nIn KEY PATTERNS: cite 1 source per pattern, short format: \"per @handle\" or \"per r/sub\"\nDo NOT include engagement metrics in citations (likes, upvotes) - save those for stats box\nDo NOT chain multiple citations: \"per @x, @y, @z\" is too much. Pick the strongest one.\n\nCITATION PRIORITY (most to least preferred):\n\n@handles from X — \"per @handle\" (these prove the tool's unique value)\nr/subreddits from Reddit — \"per r/subreddit\"\nYouTube channels — \"per [channel name] on YouTube\" (transcript-backed insights)\nWeb sources — ONLY when Reddit/X/YouTube don't cover that specific fact\n\nThe tool's value is surfacing what PEOPLE are saying, not what journalists wrote. When both a web article and an X post cover the same fact, cite the X post.\n\nURL FORMATTING: NEVER paste raw URLs in the output.\n\nBAD: \"per https://www.rollingstone.com/music/music-news/kanye-west-bully-1235506094/\"\nGOOD: \"per Rolling Stone\"\nGOOD: \"per Complex\" Use the publication name, not the URL. The user doesn't need links — they need clean, readable text.\n\nBAD: \"His album is set for March 20 (per Rolling Stone; Billboard; Complex).\" GOOD: \"His album BULLY drops March 20 — fans on X are split on the tracklist, per @honest30bgfan_\" GOOD: \"Ye's apology got massive traction on r/hiphopheads\" OK (web, only when Reddit/X don't have it): \"The Hellwatt Festival runs July 4-18 at RCF Arena, per Billboard\"\n\nLead with people, not publications. Start each topic with what Reddit/X users are saying/feeling, then add web context only if needed. The user came here for the conversation, not the press release.\n\nWhat I learned:\n\n**{Topic 1}** — [1-2 sentences about what people are saying, per @handle or r/sub]\n\n**{Topic 2}** — [1-2 sentences, per @handle or r/sub]\n\n**{Topic 3}** — [1-2 sentences, per @handle or r/sub]\n\nKEY PATTERNS from the research:\n1. [Pattern] — per @handle\n2. [Pattern] — per r/sub\n3. [Pattern] — per @handle\n\n\nTHEN - Stats (right before invitation):\n\nCRITICAL: Calculate actual totals from the research output.\n\nCount posts/threads from each section\nSum engagement: parse [Xlikes, Yrt] from each X post, [Xpts, Ycmt] from Reddit\nIdentify top voices: highest-engagement @handles from X, most active subreddits\n\nCopy this EXACTLY, replacing only the {placeholders}:\n\n---\n✅ All agents reported back!\n├─ 🟠 Reddit: {N} threads │ {N} upvotes │ {N} comments\n├─ 🔵 X: {N} posts │ {N} likes │ {N} reposts\n├─ 🔴 YouTube: {N} videos │ {N} views │ {N} with transcripts\n├─ 🌐 Web: {N} pages (supplementary)\n└─ 🗣️ Top voices: @{handle1} ({N} likes), @{handle2} │ r/{sub1}, r/{sub2}\n---\n\n\nIf Reddit returned 0 threads, write: \"├─ 🟠 Reddit: 0 threads (no results this cycle)\" If YouTube returned 0 videos or yt-dlp is not installed, omit the YouTube line entirely. NEVER use plain text dashes (-) or pipe (|). ALWAYS use ├─ └─ │ and the emoji.\n\nSELF-CHECK before displaying: Re-read your \"What I learned\" section. Does it match what the research ACTUALLY says? If you catch yourself projecting your own knowledge instead of the research, rewrite it.\n\nLAST - Invitation (adapt to QUERY_TYPE):\n\nCRITICAL: Every invitation MUST include 2-3 specific example suggestions based on what you ACTUALLY learned from the research. Don't be generic — show the user you absorbed the content by referencing real things from the results.\n\nIf QUERY_TYPE = PROMPTING:\n\n---\nI'm now an expert on {TOPIC} for {TARGET_TOOL}. What do you want to make? For example:\n- [specific idea based on popular technique from research]\n- [specific idea based on trending style/approach from research]\n- [specific idea riffing on what people are actually creating]\n\nJust describe your vision and I'll write a prompt you can paste straight into {TARGET_TOOL}.\n\n\nIf QUERY_TYPE = RECOMMENDATIONS:\n\n---\nI'm now an expert on {TOPIC}. Want me to go deeper? For example:\n- [Compare specific item A vs item B from the results]\n- [Explain why item C is trending right now]\n- [Help you get started with item D]\n\n\nIf QUERY_TYPE = NEWS:\n\n---\nI'm now an expert on {TOPIC}. Some things you could ask:\n- [Specific follow-up question about the biggest story]\n- [Question about implications of a key development]\n- [Question about what might happen next based on current trajectory]\n\n\nIf QUERY_TYPE = GENERAL:\n\n---\nI'm now an expert on {TOPIC}. Some things I can help with:\n- [Specific question based on the most discussed aspect]\n- [Specific creative/practical application of what you learned]\n- [Deeper dive into a pattern or debate from the research]\n\n\nExample invitations (to show the quality bar):\n\nFor /last30days nano banana pro prompts for Gemini:\n\nI'm now an expert on Nano Banana Pro for Gemini. What do you want to make? For example:\n\nPhotorealistic product shots with natural lighting (the most requested style right now)\nLogo designs with embedded text (Gemini's new strength per the research)\nMulti-reference style transfer from a mood board\n\nJust describe your vision and I'll write a prompt you can paste straight into Gemini.\n\nFor /last30days kanye west (GENERAL):\n\nI'm now an expert on Kanye West. Some things I can help with:\n\nWhat's the real story behind the apology letter — genuine or PR move?\nBreak down the BULLY tracklist reactions and what fans are expecting\nCompare how Reddit vs X are reacting to the Bianca narrative\n\nFor /last30days war in Iran (NEWS):\n\nI'm now an expert on the Iran situation. Some things you could ask:\n\nWhat are the realistic escalation scenarios from here?\nHow is this playing differently in US vs international media?\nWhat's the economic impact on oil markets so far?\nWAIT FOR USER'S RESPONSE\n\nAfter showing the stats summary with your invitation, STOP and wait for the user to respond.\n\nWHEN USER RESPONDS\n\nRead their response and match the intent:\n\nIf they ask a QUESTION about the topic → Answer from your research (no new searches, no prompt)\nIf they ask to GO DEEPER on a subtopic → Elaborate using your research findings\nIf they describe something they want to CREATE → Write ONE perfect prompt (see below)\nIf they ask for a PROMPT explicitly → Write ONE perfect prompt (see below)\n\nOnly write a prompt when the user wants one. Don't force a prompt on someone who asked \"what could happen next with Iran.\"\n\nWriting a Prompt\n\nWhen the user wants a prompt, write a single, highly-tailored prompt using your research expertise.\n\nCRITICAL: Match the FORMAT the research recommends\n\nIf research says to use a specific prompt FORMAT, YOU MUST USE THAT FORMAT.\n\nANTI-PATTERN: Research says \"use JSON prompts with device specs\" but you write plain prose. This defeats the entire purpose of the research.\n\nQuality Checklist (run before delivering):\n FORMAT MATCHES RESEARCH - If research said JSON/structured/etc, prompt IS that format\n Directly addresses what the user said they want to create\n Uses specific patterns/keywords discovered in research\n Ready to paste with zero edits (or minimal [PLACEHOLDERS] clearly marked)\n Appropriate length and style for TARGET_TOOL\nOutput Format:\nHere's your prompt for {TARGET_TOOL}:\n\n---\n\n[The actual prompt IN THE FORMAT THE RESEARCH RECOMMENDS]\n\n---\n\nThis uses [brief 1-line explanation of what research insight you applied].\n\nIF USER ASKS FOR MORE OPTIONS\n\nOnly if they ask for alternatives or more prompts, provide 2-3 variations. Don't dump a prompt pack unless requested.\n\nAFTER EACH PROMPT: Stay in Expert Mode\n\nAfter delivering a prompt, offer to write more:\n\nWant another prompt? Just tell me what you're creating next.\n\nCONTEXT MEMORY\n\nFor the rest of this conversation, remember:\n\nTOPIC: {topic}\nTARGET_TOOL: {tool}\nKEY PATTERNS: {list the top 3-5 patterns you learned}\nRESEARCH FINDINGS: The key facts and insights from the research\n\nCRITICAL: After research is complete, you are now an EXPERT on this topic.\n\nWhen the user asks follow-up questions:\n\nDO NOT run new WebSearches - you already have the research\nAnswer from what you learned - cite the Reddit threads, X posts, and web sources\nIf they ask a question - answer it from your research findings\nIf they ask for a prompt - write one using your expertise\n\nOnly do new research if the user explicitly asks about a DIFFERENT topic.\n\nOutput Summary Footer (After Each Prompt)\n\nAfter delivering a prompt, end with:\n\n---\n📚 Expert in: {TOPIC} for {TARGET_TOOL}\n📊 Based on: {n} Reddit threads ({sum} upvotes) + {n} X posts ({sum} likes) + {n} YouTube videos ({sum} views) + {n} web pages\n\nWant another prompt? Just tell me what you're creating next.\n\nSecurity & Permissions\n\nWhat this skill does:\n\nSends search queries to OpenAI's Responses API (api.openai.com) for Reddit discovery\nSends search queries to Twitter's GraphQL API (via browser cookie auth) or xAI's API (api.x.ai) for X search\nRuns yt-dlp locally for YouTube search and transcript extraction (no API key, public data)\nOptionally sends search queries to Brave Search API, Parallel AI API, or OpenRouter API for web search\nFetches public Reddit thread data from reddit.com for engagement metrics\nStores research findings in local SQLite database (watchlist mode only)\n\nWhat this skill does NOT do:\n\nDoes not post, like, or modify content on any platform\nDoes not access your Reddit, X, or YouTube accounts\nDoes not share API keys between providers (OpenAI key only goes to api.openai.com, etc.)\nDoes not log, cache, or write API keys to output files\nDoes not send data to any endpoint not listed above\nCannot be invoked autonomously by the agent (disable-model-invocation: true)\n\nBundled scripts: scripts/last30days.py (main research engine), scripts/lib/ (search, enrichment, rendering modules), scripts/lib/vendor/bird-search/ (vendored X search client, MIT licensed)\n\nReview scripts before first use to verify behavior."
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/johnsonDevops/last30days-skill",
    "publisherUrl": "https://clawhub.ai/johnsonDevops/last30days-skill",
    "owner": "johnsonDevops",
    "version": "1.0.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/last30days-skill",
    "downloadUrl": "https://openagent3.xyz/downloads/last30days-skill",
    "agentUrl": "https://openagent3.xyz/skills/last30days-skill/agent",
    "manifestUrl": "https://openagent3.xyz/skills/last30days-skill/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/last30days-skill/agent.md"
  }
}