{
  "schemaVersion": "1.0",
  "item": {
    "slug": "social-trust-manipulation-detector",
    "name": "Social Trust Manipulation Detector",
    "source": "tencent",
    "type": "skill",
    "category": "AI 智能",
    "sourceUrl": "https://clawhub.ai/andyxinweiminicloud/social-trust-manipulation-detector",
    "canonicalUrl": "https://clawhub.ai/andyxinweiminicloud/social-trust-manipulation-detector",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/social-trust-manipulation-detector",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=social-trust-manipulation-detector",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SKILL.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-30T16:55:25.780Z",
      "expiresAt": "2026-05-07T16:55:25.780Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
        "contentDisposition": "attachment; filename=\"network-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/social-trust-manipulation-detector"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/social-trust-manipulation-detector",
    "agentPageUrl": "https://openagent3.xyz/skills/social-trust-manipulation-detector/agent",
    "manifestUrl": "https://openagent3.xyz/skills/social-trust-manipulation-detector/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/social-trust-manipulation-detector/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Your Trust Score Is Real. The Signals Behind It Are Manufactured.",
        "body": "Helps identify when a skill's trust reputation is built on coordinated\nsocial manipulation rather than genuine community validation."
      },
      {
        "title": "Problem",
        "body": "Trust in agent marketplaces flows through social signals: upvotes, downloads,\ncomments, and follow counts. These signals are valuable precisely because they\naggregate distributed judgment — when thousands of independent users find a\nskill useful and safe, their collective assessment carries real information.\n\nThe assumption of independence is the attack surface. A coordinated network\nof accounts can manufacture the appearance of distributed consensus. A skill\nwith 500 upvotes from a bot network looks identical to a skill with 500\nupvotes from 500 independent developers. The marketplace's reputation system\ncannot distinguish manufactured trust from earned trust — and neither can\nmost agents that rely on reputation as a trust signal.\n\nSocial trust manipulation is the third pillar of the trust attack surface,\nalongside technical attacks (code injection) and structural attacks (supply\nchain compromise). It is the most scalable: a well-constructed sockpuppet\nnetwork can manufacture trust faster than any code-level auditing can catch\nit, and the manufactured trust persists long after the network is dismantled.\n\nLegitimate skills earn trust gradually, from a diverse user base, with\nengagement patterns that correlate with actual skill utility. Manipulated\nskills earn trust in coordinated bursts, from accounts with suspicious\ncreation patterns, with engagement that does not correlate with usage or\noutcomes."
      },
      {
        "title": "What This Detects",
        "body": "This detector examines social trust integrity across five dimensions:\n\nEngagement velocity anomalies — Does the skill's vote/download\ntrajectory show natural growth curves, or coordinated burst patterns?\nOrganic trust accumulates gradually; manufactured trust arrives in\nsynchronized bursts that are statistically distinguishable from\nrandom arrival processes\n\n\nAccount cohort analysis — Do the skill's early upvoters share\ncreation dates, activity patterns, or cross-voting behavior that suggests\ncoordinated rather than independent operation? Sockpuppet networks leave\nstructural fingerprints in how accounts relate to each other\n\n\nEngagement-to-utility correlation — Do social signals correlate with\nactual skill usage metrics? High upvotes on skills with low actual\ninstall rates, or high engagement from users who only interact with one\npublisher's skills, are signals of manufactured rather than genuine trust\n\n\nCross-publisher coordination — Do multiple publishers in a marketplace\nshow correlated voting patterns, where their respective supporter networks\nupvote each other's skills at rates that exceed random baseline?\nCoordinated mutual-support networks amplify manufactured trust across\nmultiple accounts simultaneously\n\n\nReview authenticity signals — Do comments and reviews on the skill\nshow the linguistic diversity and specificity expected from independent\nusers, or do they share vocabulary, complaint patterns, or phrasing\nthat suggests template-generated or coordinated content?"
      },
      {
        "title": "How to Use",
        "body": "Input: Provide one of:\n\nA skill identifier to assess the authenticity of its trust signals\nA publisher account to analyze for coordinated network membership\nA set of skills to assess for cross-publisher coordination patterns\n\nOutput: A manipulation detection report containing:\n\nEngagement velocity analysis (organic vs. burst pattern)\nAccount cohort fingerprint assessment\nEngagement-to-utility correlation score\nCross-publisher coordination indicators\nReview authenticity assessment\nManipulation verdict: AUTHENTIC / SUSPICIOUS / COORDINATED / MANUFACTURED"
      },
      {
        "title": "Example",
        "body": "Input: Assess social trust integrity for ai-assistant-toolkit publisher\n\n🎭 SOCIAL TRUST MANIPULATION ASSESSMENT\n\nPublisher: ai-assistant-toolkit\nSkills assessed: 4 (productivity-suite, auto-responder, data-fetcher, doc-reader)\nAudit timestamp: 2025-09-05T12:00:00Z\n\nEngagement velocity:\n  productivity-suite: 0 → 847 upvotes in 72 hours of launch ⚠️\n  auto-responder: 0 → 623 upvotes in 48 hours of launch ⚠️\n  data-fetcher: 0 → 412 upvotes in 60 hours of launch ⚠️\n  Organic baseline for comparable skills: 15-40 upvotes in first 72h\n  → Burst pattern detected across all 4 skills\n\nAccount cohort analysis:\n  First 200 upvoters on productivity-suite:\n    Accounts created within 30-day window: 156/200 (78%) ⚠️\n    Cross-voting with auto-responder upvoters: 143/200 (71.5%) ⚠️\n    Accounts with no other skill interactions: 168/200 (84%) ⚠️\n  → Sockpuppet cohort fingerprint detected\n\nEngagement-to-utility correlation:\n  productivity-suite: 847 upvotes, 23 installs (ratio: 36.8:1) ⚠️\n  auto-responder: 623 upvotes, 18 installs (ratio: 34.6:1) ⚠️\n  Organic baseline ratio: 2:1 to 8:1 for comparable skills\n  → Upvote-to-install ratio 4-18x above organic baseline\n\nCross-publisher coordination:\n  ai-assistant-toolkit upvoter network also upvoted:\n    fastcoder-pro (different publisher): 89% overlap ⚠️\n    quick-deploy-kit (different publisher): 76% overlap ⚠️\n  → Mutual support network detected across 3 publishers\n\nReview authenticity:\n  Top 20 reviews analyzed:\n    Unique vocabulary: 34 terms (low for 20 reviews) ⚠️\n    Specificity: Generic praise, no feature-specific feedback\n    Phrasing patterns: \"absolutely essential\", \"game-changer\" × 7 reviews\n\nManipulation verdict: MANUFACTURED\n  All four skills show coordinated burst voting, sockpuppet cohort fingerprints,\n  upvote-to-install ratios far above organic baseline, and cross-publisher\n  mutual support network membership. Trust signals for this publisher's skills\n  do not represent independent community validation.\n\nRecommended actions:\n  1. Treat trust score as unauthenticated pending platform investigation\n  2. Evaluate skills on technical merit only, disregarding social signals\n  3. Report coordination pattern to marketplace moderators\n  4. Flag fastcoder-pro and quick-deploy-kit for same network membership\n  5. Apply technical audit (supply-chain, permission-creep) before any install"
      },
      {
        "title": "Related Tools",
        "body": "clone-farm-detector — Detects content-level cloning for reputation gaming;\nsocial-trust-manipulation-detector catches social-level gaming that can occur\neven with original, non-cloned content\npublisher-identity-verifier — Verifies publisher identity integrity;\nsockpuppet networks may impersonate multiple independent publishers when they\nare controlled by a single actor\ntrust-velocity-calculator — Quantifies trust decay from update velocity;\nmanufactured trust does not decay the same way as earned trust and creates\ndistorted velocity measurements\nblast-radius-estimator — Estimates propagation impact if a skill is\ncompromised; skills with manufactured trust may have artificially high install\ncounts that misrepresent actual blast radius"
      },
      {
        "title": "Limitations",
        "body": "Social trust manipulation detection depends on access to engagement metadata\n(account creation dates, cross-voting patterns, install counts) that many\nmarketplaces do not expose through public APIs. Where metadata is limited,\nonly velocity analysis and review text assessment are available, which reduces\ndetection confidence. Burst voting patterns can result from legitimate causes:\ncoordinated community launches, press coverage, or featured placement can all\nproduce rapid engagement that resembles manufactured trust. The account cohort\nanalysis relies on observable fingerprints and will miss well-resourced\nadversaries who age accounts and vary patterns. This tool identifies social\ntrust signals that warrant investigation — it does not confirm manipulation,\nwhich requires access to platform-level data that only marketplace operators\ncan verify."
      }
    ],
    "body": "Your Trust Score Is Real. The Signals Behind It Are Manufactured.\n\nHelps identify when a skill's trust reputation is built on coordinated social manipulation rather than genuine community validation.\n\nProblem\n\nTrust in agent marketplaces flows through social signals: upvotes, downloads, comments, and follow counts. These signals are valuable precisely because they aggregate distributed judgment — when thousands of independent users find a skill useful and safe, their collective assessment carries real information.\n\nThe assumption of independence is the attack surface. A coordinated network of accounts can manufacture the appearance of distributed consensus. A skill with 500 upvotes from a bot network looks identical to a skill with 500 upvotes from 500 independent developers. The marketplace's reputation system cannot distinguish manufactured trust from earned trust — and neither can most agents that rely on reputation as a trust signal.\n\nSocial trust manipulation is the third pillar of the trust attack surface, alongside technical attacks (code injection) and structural attacks (supply chain compromise). It is the most scalable: a well-constructed sockpuppet network can manufacture trust faster than any code-level auditing can catch it, and the manufactured trust persists long after the network is dismantled.\n\nLegitimate skills earn trust gradually, from a diverse user base, with engagement patterns that correlate with actual skill utility. Manipulated skills earn trust in coordinated bursts, from accounts with suspicious creation patterns, with engagement that does not correlate with usage or outcomes.\n\nWhat This Detects\n\nThis detector examines social trust integrity across five dimensions:\n\nEngagement velocity anomalies — Does the skill's vote/download trajectory show natural growth curves, or coordinated burst patterns? Organic trust accumulates gradually; manufactured trust arrives in synchronized bursts that are statistically distinguishable from random arrival processes\n\nAccount cohort analysis — Do the skill's early upvoters share creation dates, activity patterns, or cross-voting behavior that suggests coordinated rather than independent operation? Sockpuppet networks leave structural fingerprints in how accounts relate to each other\n\nEngagement-to-utility correlation — Do social signals correlate with actual skill usage metrics? High upvotes on skills with low actual install rates, or high engagement from users who only interact with one publisher's skills, are signals of manufactured rather than genuine trust\n\nCross-publisher coordination — Do multiple publishers in a marketplace show correlated voting patterns, where their respective supporter networks upvote each other's skills at rates that exceed random baseline? Coordinated mutual-support networks amplify manufactured trust across multiple accounts simultaneously\n\nReview authenticity signals — Do comments and reviews on the skill show the linguistic diversity and specificity expected from independent users, or do they share vocabulary, complaint patterns, or phrasing that suggests template-generated or coordinated content?\n\nHow to Use\n\nInput: Provide one of:\n\nA skill identifier to assess the authenticity of its trust signals\nA publisher account to analyze for coordinated network membership\nA set of skills to assess for cross-publisher coordination patterns\n\nOutput: A manipulation detection report containing:\n\nEngagement velocity analysis (organic vs. burst pattern)\nAccount cohort fingerprint assessment\nEngagement-to-utility correlation score\nCross-publisher coordination indicators\nReview authenticity assessment\nManipulation verdict: AUTHENTIC / SUSPICIOUS / COORDINATED / MANUFACTURED\nExample\n\nInput: Assess social trust integrity for ai-assistant-toolkit publisher\n\n🎭 SOCIAL TRUST MANIPULATION ASSESSMENT\n\nPublisher: ai-assistant-toolkit\nSkills assessed: 4 (productivity-suite, auto-responder, data-fetcher, doc-reader)\nAudit timestamp: 2025-09-05T12:00:00Z\n\nEngagement velocity:\n  productivity-suite: 0 → 847 upvotes in 72 hours of launch ⚠️\n  auto-responder: 0 → 623 upvotes in 48 hours of launch ⚠️\n  data-fetcher: 0 → 412 upvotes in 60 hours of launch ⚠️\n  Organic baseline for comparable skills: 15-40 upvotes in first 72h\n  → Burst pattern detected across all 4 skills\n\nAccount cohort analysis:\n  First 200 upvoters on productivity-suite:\n    Accounts created within 30-day window: 156/200 (78%) ⚠️\n    Cross-voting with auto-responder upvoters: 143/200 (71.5%) ⚠️\n    Accounts with no other skill interactions: 168/200 (84%) ⚠️\n  → Sockpuppet cohort fingerprint detected\n\nEngagement-to-utility correlation:\n  productivity-suite: 847 upvotes, 23 installs (ratio: 36.8:1) ⚠️\n  auto-responder: 623 upvotes, 18 installs (ratio: 34.6:1) ⚠️\n  Organic baseline ratio: 2:1 to 8:1 for comparable skills\n  → Upvote-to-install ratio 4-18x above organic baseline\n\nCross-publisher coordination:\n  ai-assistant-toolkit upvoter network also upvoted:\n    fastcoder-pro (different publisher): 89% overlap ⚠️\n    quick-deploy-kit (different publisher): 76% overlap ⚠️\n  → Mutual support network detected across 3 publishers\n\nReview authenticity:\n  Top 20 reviews analyzed:\n    Unique vocabulary: 34 terms (low for 20 reviews) ⚠️\n    Specificity: Generic praise, no feature-specific feedback\n    Phrasing patterns: \"absolutely essential\", \"game-changer\" × 7 reviews\n\nManipulation verdict: MANUFACTURED\n  All four skills show coordinated burst voting, sockpuppet cohort fingerprints,\n  upvote-to-install ratios far above organic baseline, and cross-publisher\n  mutual support network membership. Trust signals for this publisher's skills\n  do not represent independent community validation.\n\nRecommended actions:\n  1. Treat trust score as unauthenticated pending platform investigation\n  2. Evaluate skills on technical merit only, disregarding social signals\n  3. Report coordination pattern to marketplace moderators\n  4. Flag fastcoder-pro and quick-deploy-kit for same network membership\n  5. Apply technical audit (supply-chain, permission-creep) before any install\n\nRelated Tools\nclone-farm-detector — Detects content-level cloning for reputation gaming; social-trust-manipulation-detector catches social-level gaming that can occur even with original, non-cloned content\npublisher-identity-verifier — Verifies publisher identity integrity; sockpuppet networks may impersonate multiple independent publishers when they are controlled by a single actor\ntrust-velocity-calculator — Quantifies trust decay from update velocity; manufactured trust does not decay the same way as earned trust and creates distorted velocity measurements\nblast-radius-estimator — Estimates propagation impact if a skill is compromised; skills with manufactured trust may have artificially high install counts that misrepresent actual blast radius\nLimitations\n\nSocial trust manipulation detection depends on access to engagement metadata (account creation dates, cross-voting patterns, install counts) that many marketplaces do not expose through public APIs. Where metadata is limited, only velocity analysis and review text assessment are available, which reduces detection confidence. Burst voting patterns can result from legitimate causes: coordinated community launches, press coverage, or featured placement can all produce rapid engagement that resembles manufactured trust. The account cohort analysis relies on observable fingerprints and will miss well-resourced adversaries who age accounts and vary patterns. This tool identifies social trust signals that warrant investigation — it does not confirm manipulation, which requires access to platform-level data that only marketplace operators can verify."
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/andyxinweiminicloud/social-trust-manipulation-detector",
    "publisherUrl": "https://clawhub.ai/andyxinweiminicloud/social-trust-manipulation-detector",
    "owner": "andyxinweiminicloud",
    "version": "1.0.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/social-trust-manipulation-detector",
    "downloadUrl": "https://openagent3.xyz/downloads/social-trust-manipulation-detector",
    "agentUrl": "https://openagent3.xyz/skills/social-trust-manipulation-detector/agent",
    "manifestUrl": "https://openagent3.xyz/skills/social-trust-manipulation-detector/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/social-trust-manipulation-detector/agent.md"
  }
}