{
  "schemaVersion": "1.0",
  "item": {
    "slug": "ads",
    "name": "Ads",
    "source": "tencent",
    "type": "skill",
    "category": "效率提升",
    "sourceUrl": "https://clawhub.ai/ivangdavila/ads",
    "canonicalUrl": "https://clawhub.ai/ivangdavila/ads",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/ads",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=ads",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SKILL.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-23T16:43:11.935Z",
      "expiresAt": "2026-04-30T16:43:11.935Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
        "contentDisposition": "attachment; filename=\"4claw-imageboard-1.0.1.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/ads"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/ads",
    "agentPageUrl": "https://openagent3.xyz/skills/ads/agent",
    "manifestUrl": "https://openagent3.xyz/skills/ads/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/ads/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Budget Mistakes",
        "body": "Starting with daily budgets too low to exit learning phase — platforms need 50+ conversions/week per ad set to optimize properly\nSpreading budget across too many campaigns early — concentrate spend to gather statistically significant data faster\nKilling ads before statistical significance — minimum 100 clicks or 1000 impressions before judging creative performance\nNo contingency for scaling — reserve 20-30% of budget for doubling down on winners mid-month\nTreating ad spend as expense, not investment — track payback period, not just immediate ROAS"
      },
      {
        "title": "Metric Traps",
        "body": "Optimizing for CTR instead of conversion — high CTR with low conversion = curiosity clicks that waste budget\nTrusting platform-reported conversions — attribution windows vary (7-day click, 1-day view), always cross-reference with actual revenue\nIgnoring frequency — above 3-4 frequency per week, performance degrades and audience burns out\nCPA tunnel vision — a $50 CPA is better than $30 CPA if LTV is 3x higher for the $50 cohort\nVanity reach metrics — 1M impressions mean nothing if 0 target customers saw the ad"
      },
      {
        "title": "Creative Rules",
        "body": "One variable per test — changing image AND copy simultaneously teaches nothing about what works\nWinning ads fatigue in 2-4 weeks — have next creative batch ready before performance drops\nStatic images often outperform video on cost-per-conversion — test both, don't assume video is better\nHeadlines matter more than body copy — 80% of viewers read only the headline\nUser-generated content style outperforms polished brand creative in most direct response contexts"
      },
      {
        "title": "Audience Strategy",
        "body": "Broad targeting often wins at scale — platform algorithms find converters better than manual interest stacking\nLookalike audiences need minimum 1000 source users — smaller seeds create unstable lookalikes\nRetargeting pools need 7-14 day recency caps — beyond that, intent has faded\nExclude converters from prospecting campaigns — paying to show ads to existing customers wastes budget\nTest 1% vs 3% vs 5% lookalikes — tighter isn't always better, depends on market size"
      },
      {
        "title": "Platform-Specific Patterns",
        "body": "Meta: Learning phase resets with significant edits — avoid editing during first 50 conversions\nGoogle: Search intent beats display reach for direct response — display is for awareness, search is for capture\nTikTok: First 3 seconds determine everything — hook must be instant, no slow brand intros\nLinkedIn: CPMs are 5-10x higher — only viable for high-LTV B2B where one customer justifies $200+ CPA\nYouTube: Skippable ads teach you what hooks work — if they don't skip, your hook is strong"
      },
      {
        "title": "Scaling Pitfalls",
        "body": "Increasing budget more than 20-30% per day destabilizes campaigns — gradual scaling preserves algorithm learning\nDuplicating winning ad sets fragments the audience and causes self-competition\nScaling spend without scaling creative — same ads to larger audience = faster fatigue\nIgnoring incrementality — some conversions would have happened organically, true ROAS is lower than reported\nGeographic expansion without localization — same ad in new market often fails"
      },
      {
        "title": "Landing Page Impact",
        "body": "Ads are only half the equation — a 2x better landing page beats 2x more ad spend\nMessage match: ad promise must appear above the fold on landing page — disconnect kills conversion\nPage load time over 3 seconds loses 50%+ of paid clicks — optimize speed before scaling spend\nOne landing page per audience segment — generic pages convert worse than specific ones\nTrack micro-conversions (scroll depth, time on page) when sample size is too small for macro-conversions"
      },
      {
        "title": "Attribution Reality",
        "body": "Last-click attribution undervalues awareness campaigns — multi-touch attribution or holdout tests reveal true impact\niOS 14.5+ broke tracking for ~40% of users — model conversions, don't rely on pixel data alone\nOffline conversions (calls, in-store) need manual import or integration — otherwise CPA looks inflated\nView-through conversions are real but overvalued by platforms — weight click-through higher\n7-day attribution windows miss longer B2B sales cycles — extend windows or use CRM-based attribution"
      },
      {
        "title": "Testing Framework",
        "body": "Always run one control ad — without baseline, you don't know if new creative is better or platform just performed differently\nMinimum 2 weeks per test — weekday/weekend patterns affect results\nDocument every test with hypothesis, result, and learning — institutional memory prevents repeat mistakes\nTest audiences before creatives — wrong audience can't be saved by good creative\nNegative results are valuable — knowing what doesn't work prevents future waste"
      }
    ],
    "body": "Budget Mistakes\nStarting with daily budgets too low to exit learning phase — platforms need 50+ conversions/week per ad set to optimize properly\nSpreading budget across too many campaigns early — concentrate spend to gather statistically significant data faster\nKilling ads before statistical significance — minimum 100 clicks or 1000 impressions before judging creative performance\nNo contingency for scaling — reserve 20-30% of budget for doubling down on winners mid-month\nTreating ad spend as expense, not investment — track payback period, not just immediate ROAS\nMetric Traps\nOptimizing for CTR instead of conversion — high CTR with low conversion = curiosity clicks that waste budget\nTrusting platform-reported conversions — attribution windows vary (7-day click, 1-day view), always cross-reference with actual revenue\nIgnoring frequency — above 3-4 frequency per week, performance degrades and audience burns out\nCPA tunnel vision — a $50 CPA is better than $30 CPA if LTV is 3x higher for the $50 cohort\nVanity reach metrics — 1M impressions mean nothing if 0 target customers saw the ad\nCreative Rules\nOne variable per test — changing image AND copy simultaneously teaches nothing about what works\nWinning ads fatigue in 2-4 weeks — have next creative batch ready before performance drops\nStatic images often outperform video on cost-per-conversion — test both, don't assume video is better\nHeadlines matter more than body copy — 80% of viewers read only the headline\nUser-generated content style outperforms polished brand creative in most direct response contexts\nAudience Strategy\nBroad targeting often wins at scale — platform algorithms find converters better than manual interest stacking\nLookalike audiences need minimum 1000 source users — smaller seeds create unstable lookalikes\nRetargeting pools need 7-14 day recency caps — beyond that, intent has faded\nExclude converters from prospecting campaigns — paying to show ads to existing customers wastes budget\nTest 1% vs 3% vs 5% lookalikes — tighter isn't always better, depends on market size\nPlatform-Specific Patterns\nMeta: Learning phase resets with significant edits — avoid editing during first 50 conversions\nGoogle: Search intent beats display reach for direct response — display is for awareness, search is for capture\nTikTok: First 3 seconds determine everything — hook must be instant, no slow brand intros\nLinkedIn: CPMs are 5-10x higher — only viable for high-LTV B2B where one customer justifies $200+ CPA\nYouTube: Skippable ads teach you what hooks work — if they don't skip, your hook is strong\nScaling Pitfalls\nIncreasing budget more than 20-30% per day destabilizes campaigns — gradual scaling preserves algorithm learning\nDuplicating winning ad sets fragments the audience and causes self-competition\nScaling spend without scaling creative — same ads to larger audience = faster fatigue\nIgnoring incrementality — some conversions would have happened organically, true ROAS is lower than reported\nGeographic expansion without localization — same ad in new market often fails\nLanding Page Impact\nAds are only half the equation — a 2x better landing page beats 2x more ad spend\nMessage match: ad promise must appear above the fold on landing page — disconnect kills conversion\nPage load time over 3 seconds loses 50%+ of paid clicks — optimize speed before scaling spend\nOne landing page per audience segment — generic pages convert worse than specific ones\nTrack micro-conversions (scroll depth, time on page) when sample size is too small for macro-conversions\nAttribution Reality\nLast-click attribution undervalues awareness campaigns — multi-touch attribution or holdout tests reveal true impact\niOS 14.5+ broke tracking for ~40% of users — model conversions, don't rely on pixel data alone\nOffline conversions (calls, in-store) need manual import or integration — otherwise CPA looks inflated\nView-through conversions are real but overvalued by platforms — weight click-through higher\n7-day attribution windows miss longer B2B sales cycles — extend windows or use CRM-based attribution\nTesting Framework\nAlways run one control ad — without baseline, you don't know if new creative is better or platform just performed differently\nMinimum 2 weeks per test — weekday/weekend patterns affect results\nDocument every test with hypothesis, result, and learning — institutional memory prevents repeat mistakes\nTest audiences before creatives — wrong audience can't be saved by good creative\nNegative results are valuable — knowing what doesn't work prevents future waste"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/ivangdavila/ads",
    "publisherUrl": "https://clawhub.ai/ivangdavila/ads",
    "owner": "ivangdavila",
    "version": "1.0.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/ads",
    "downloadUrl": "https://openagent3.xyz/downloads/ads",
    "agentUrl": "https://openagent3.xyz/skills/ads/agent",
    "manifestUrl": "https://openagent3.xyz/skills/ads/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/ads/agent.md"
  }
}