{
  "schemaVersion": "1.0",
  "item": {
    "slug": "decodo-scraper-skill",
    "name": "Decodo Web Scraper",
    "source": "tencent",
    "type": "skill",
    "category": "开发工具",
    "sourceUrl": "https://clawhub.ai/DonatasDecodo/decodo-scraper-skill",
    "canonicalUrl": "https://clawhub.ai/DonatasDecodo/decodo-scraper-skill",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/decodo-scraper-skill",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=decodo-scraper-skill",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "README.md",
      "SKILL.md",
      "requirements.txt",
      "tools/scrape.py"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-30T16:55:25.780Z",
      "expiresAt": "2026-05-07T16:55:25.780Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
        "contentDisposition": "attachment; filename=\"network-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/decodo-scraper-skill"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/decodo-scraper-skill",
    "agentPageUrl": "https://openagent3.xyz/skills/decodo-scraper-skill/agent",
    "manifestUrl": "https://openagent3.xyz/skills/decodo-scraper-skill/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/decodo-scraper-skill/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Decodo Scraper OpenClaw Skill",
        "body": "Use this skill to search Google, scrape any URL, or fetch YouTube subtitles via the Decodo Web Scraping API. Search outputs a JSON object of result sections; Scrape URL outputs plain markdown; Amazon and Amazon search output parsed product-page or search results (JSON). Amazon search uses --query. YouTube subtitles outputs transcript/subtitles. Reddit post and Reddit subreddit output post/listing content (JSON).\n\nAuthentication: Set DECODO_AUTH_TOKEN (Basic auth token from Decodo Dashboard → Scraping APIs) in your environment or in a .env file in the repo root.\n\nErrors: On failure the script writes a JSON error to stderr and exits with code 1."
      },
      {
        "title": "1. Search Google",
        "body": "Use this to find URLs, answers, or structured search results. The API returns a JSON object whose results key contains several sections (not all may be present for every query):\n\nSectionDescriptionorganicMain search results (titles, links, snippets).ai_overviewsAI-generated overviews or summaries when Google shows them.paidPaid/sponsored results (ads).related_questions“People also ask”–style questions and answers.related_searchesSuggested related search queries.discussions_and_forumsForum or discussion results (e.g. Reddit, Stack Exchange).\n\nThe script outputs only the inner results object (these sections); pagination info (page, last_visible_page, parse_status_code) is not included.\n\nCommand:\n\npython3 tools/scrape.py --target google_search --query \"your search query\"\n\nExamples:\n\npython3 tools/scrape.py --target google_search --query \"best laptops 2025\"\npython3 tools/scrape.py --target google_search --query \"python requests tutorial\"\n\nOptional: --geo us or --locale en for location/language."
      },
      {
        "title": "2. Scrape URL",
        "body": "Use this to get the content of a specific web page. By default the API returns content as Markdown (cleaner for LLMs and lower token usage).\n\nCommand:\n\npython3 tools/scrape.py --target universal --url \"https://example.com\"\n\nExamples:\n\npython3 tools/scrape.py --target universal --url \"https://example.com\"\npython3 tools/scrape.py --target universal --url \"https://news.ycombinator.com/\""
      },
      {
        "title": "3. Amazon product page",
        "body": "Use this to get parsed data from an Amazon product (or other Amazon) page. Pass the product page URL as --url. The script sends parse: true and outputs the inner results object (e.g. ads, product details, etc.).\n\nCommand:\n\npython3 tools/scrape.py --target amazon --url \"https://www.amazon.com/dp/PRODUCT_ID\"\n\nExamples:\n\npython3 tools/scrape.py --target amazon --url \"https://www.amazon.com/dp/B09H74FXNW\""
      },
      {
        "title": "4. Amazon search",
        "body": "Use this to search Amazon and get parsed results (search results list, delivery_postcode, etc.). Pass the search query as --query.\n\nCommand:\n\npython3 tools/scrape.py --target amazon_search --query \"your search query\"\n\nExamples:\n\npython3 tools/scrape.py --target amazon_search --query \"laptop\""
      },
      {
        "title": "5. YouTube subtitles",
        "body": "Use this to get subtitles/transcript for a YouTube video. Pass the video ID (e.g. from youtube.com/watch?v=VIDEO_ID) as --query.\n\nCommand:\n\npython3 tools/scrape.py --target youtube_subtitles --query \"VIDEO_ID\"\n\nExamples:\n\npython3 tools/scrape.py --target youtube_subtitles --query \"dFu9aKJoqGg\""
      },
      {
        "title": "6. Reddit post",
        "body": "Use this to get the content of a Reddit post (thread). Pass the full post URL as --url.\n\nCommand:\n\npython3 tools/scrape.py --target reddit_post --url \"https://www.reddit.com/r/SUBREDDIT/comments/ID/...\"\n\nExamples:\n\npython3 tools/scrape.py --target reddit_post --url \"https://www.reddit.com/r/nba/comments/17jrqc5/serious_next_day_thread_postgame_discussion/\""
      },
      {
        "title": "7. Reddit subreddit",
        "body": "Use this to get the listing (posts) of a Reddit subreddit. Pass the subreddit URL as --url.\n\nCommand:\n\npython3 tools/scrape.py --target reddit_subreddit --url \"https://www.reddit.com/r/SUBREDDIT/\"\n\nExamples:\n\npython3 tools/scrape.py --target reddit_subreddit --url \"https://www.reddit.com/r/nba/\""
      },
      {
        "title": "Summary",
        "body": "ActionTargetArgumentExample commandSearchgoogle_search--querypython3 tools/scrape.py --target google_search --query \"laptop\"Scrape pageuniversal--urlpython3 tools/scrape.py --target universal --url \"https://example.com\"Amazon productamazon--urlpython3 tools/scrape.py --target amazon --url \"https://www.amazon.com/dp/B09H74FXNW\"Amazon searchamazon_search--querypython3 tools/scrape.py --target amazon_search --query \"laptop\"YouTube subtitlesyoutube_subtitles--querypython3 tools/scrape.py --target youtube_subtitles --query \"dFu9aKJoqGg\"Reddit postreddit_post--urlpython3 tools/scrape.py --target reddit_post --url \"https://www.reddit.com/r/nba/comments/17jrqc5/...\"Reddit subredditreddit_subreddit--urlpython3 tools/scrape.py --target reddit_subreddit --url \"https://www.reddit.com/r/nba/\"\n\nOutput: Search → JSON (sections). Scrape URL → markdown. Amazon / Amazon search → JSON (results e.g. ads, product info, delivery_postcode). YouTube → transcript. Reddit → JSON (content)."
      }
    ],
    "body": "Decodo Scraper OpenClaw Skill\n\nUse this skill to search Google, scrape any URL, or fetch YouTube subtitles via the Decodo Web Scraping API. Search outputs a JSON object of result sections; Scrape URL outputs plain markdown; Amazon and Amazon search output parsed product-page or search results (JSON). Amazon search uses --query. YouTube subtitles outputs transcript/subtitles. Reddit post and Reddit subreddit output post/listing content (JSON).\n\nAuthentication: Set DECODO_AUTH_TOKEN (Basic auth token from Decodo Dashboard → Scraping APIs) in your environment or in a .env file in the repo root.\n\nErrors: On failure the script writes a JSON error to stderr and exits with code 1.\n\nTools\n1. Search Google\n\nUse this to find URLs, answers, or structured search results. The API returns a JSON object whose results key contains several sections (not all may be present for every query):\n\nSection\tDescription\norganic\tMain search results (titles, links, snippets).\nai_overviews\tAI-generated overviews or summaries when Google shows them.\npaid\tPaid/sponsored results (ads).\nrelated_questions\t“People also ask”–style questions and answers.\nrelated_searches\tSuggested related search queries.\ndiscussions_and_forums\tForum or discussion results (e.g. Reddit, Stack Exchange).\n\nThe script outputs only the inner results object (these sections); pagination info (page, last_visible_page, parse_status_code) is not included.\n\nCommand:\n\npython3 tools/scrape.py --target google_search --query \"your search query\"\n\n\nExamples:\n\npython3 tools/scrape.py --target google_search --query \"best laptops 2025\"\npython3 tools/scrape.py --target google_search --query \"python requests tutorial\"\n\n\nOptional: --geo us or --locale en for location/language.\n\n2. Scrape URL\n\nUse this to get the content of a specific web page. By default the API returns content as Markdown (cleaner for LLMs and lower token usage).\n\nCommand:\n\npython3 tools/scrape.py --target universal --url \"https://example.com\"\n\n\nExamples:\n\npython3 tools/scrape.py --target universal --url \"https://example.com\"\npython3 tools/scrape.py --target universal --url \"https://news.ycombinator.com/\"\n\n3. Amazon product page\n\nUse this to get parsed data from an Amazon product (or other Amazon) page. Pass the product page URL as --url. The script sends parse: true and outputs the inner results object (e.g. ads, product details, etc.).\n\nCommand:\n\npython3 tools/scrape.py --target amazon --url \"https://www.amazon.com/dp/PRODUCT_ID\"\n\n\nExamples:\n\npython3 tools/scrape.py --target amazon --url \"https://www.amazon.com/dp/B09H74FXNW\"\n\n4. Amazon search\n\nUse this to search Amazon and get parsed results (search results list, delivery_postcode, etc.). Pass the search query as --query.\n\nCommand:\n\npython3 tools/scrape.py --target amazon_search --query \"your search query\"\n\n\nExamples:\n\npython3 tools/scrape.py --target amazon_search --query \"laptop\"\n\n5. YouTube subtitles\n\nUse this to get subtitles/transcript for a YouTube video. Pass the video ID (e.g. from youtube.com/watch?v=VIDEO_ID) as --query.\n\nCommand:\n\npython3 tools/scrape.py --target youtube_subtitles --query \"VIDEO_ID\"\n\n\nExamples:\n\npython3 tools/scrape.py --target youtube_subtitles --query \"dFu9aKJoqGg\"\n\n6. Reddit post\n\nUse this to get the content of a Reddit post (thread). Pass the full post URL as --url.\n\nCommand:\n\npython3 tools/scrape.py --target reddit_post --url \"https://www.reddit.com/r/SUBREDDIT/comments/ID/...\"\n\n\nExamples:\n\npython3 tools/scrape.py --target reddit_post --url \"https://www.reddit.com/r/nba/comments/17jrqc5/serious_next_day_thread_postgame_discussion/\"\n\n7. Reddit subreddit\n\nUse this to get the listing (posts) of a Reddit subreddit. Pass the subreddit URL as --url.\n\nCommand:\n\npython3 tools/scrape.py --target reddit_subreddit --url \"https://www.reddit.com/r/SUBREDDIT/\"\n\n\nExamples:\n\npython3 tools/scrape.py --target reddit_subreddit --url \"https://www.reddit.com/r/nba/\"\n\nSummary\nAction\tTarget\tArgument\tExample command\nSearch\tgoogle_search\t--query\tpython3 tools/scrape.py --target google_search --query \"laptop\"\nScrape page\tuniversal\t--url\tpython3 tools/scrape.py --target universal --url \"https://example.com\"\nAmazon product\tamazon\t--url\tpython3 tools/scrape.py --target amazon --url \"https://www.amazon.com/dp/B09H74FXNW\"\nAmazon search\tamazon_search\t--query\tpython3 tools/scrape.py --target amazon_search --query \"laptop\"\nYouTube subtitles\tyoutube_subtitles\t--query\tpython3 tools/scrape.py --target youtube_subtitles --query \"dFu9aKJoqGg\"\nReddit post\treddit_post\t--url\tpython3 tools/scrape.py --target reddit_post --url \"https://www.reddit.com/r/nba/comments/17jrqc5/...\"\nReddit subreddit\treddit_subreddit\t--url\tpython3 tools/scrape.py --target reddit_subreddit --url \"https://www.reddit.com/r/nba/\"\n\nOutput: Search → JSON (sections). Scrape URL → markdown. Amazon / Amazon search → JSON (results e.g. ads, product info, delivery_postcode). YouTube → transcript. Reddit → JSON (content)."
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/DonatasDecodo/decodo-scraper-skill",
    "publisherUrl": "https://clawhub.ai/DonatasDecodo/decodo-scraper-skill",
    "owner": "DonatasDecodo",
    "version": "1.1.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/decodo-scraper-skill",
    "downloadUrl": "https://openagent3.xyz/downloads/decodo-scraper-skill",
    "agentUrl": "https://openagent3.xyz/skills/decodo-scraper-skill/agent",
    "manifestUrl": "https://openagent3.xyz/skills/decodo-scraper-skill/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/decodo-scraper-skill/agent.md"
  }
}