{
  "schemaVersion": "1.0",
  "item": {
    "slug": "skill-reviewer",
    "name": "Skill Reviewer",
    "source": "tencent",
    "type": "skill",
    "category": "AI 智能",
    "sourceUrl": "https://clawhub.ai/gitgoodordietrying/skill-reviewer",
    "canonicalUrl": "https://clawhub.ai/gitgoodordietrying/skill-reviewer",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/skill-reviewer",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=skill-reviewer",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SKILL.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-30T16:55:25.780Z",
      "expiresAt": "2026-05-07T16:55:25.780Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
        "contentDisposition": "attachment; filename=\"network-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/skill-reviewer"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/skill-reviewer",
    "agentPageUrl": "https://openagent3.xyz/skills/skill-reviewer/agent",
    "manifestUrl": "https://openagent3.xyz/skills/skill-reviewer/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/skill-reviewer/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Skill Reviewer",
        "body": "Audit agent skills (SKILL.md files) for quality, correctness, and completeness. Provides a structured review framework with scoring rubric, defect checklists, and improvement recommendations."
      },
      {
        "title": "When to Use",
        "body": "Reviewing a skill before publishing to the registry\nEvaluating a skill you downloaded from the registry\nAuditing your own skills for quality improvements\nComparing skills in the same category\nDeciding whether a skill is worth installing"
      },
      {
        "title": "Step 1: Structural Check",
        "body": "Verify the skill has the required structure. Read the file and check each item:\n\nSTRUCTURAL CHECKLIST:\n[ ] Valid YAML frontmatter (opens and closes with ---)\n[ ] `name` field present and is a valid slug (lowercase, hyphenated)\n[ ] `description` field present and non-empty\n[ ] `metadata` field present with valid JSON\n[ ] `metadata.clawdbot.emoji` is a single emoji\n[ ] `metadata.clawdbot.requires.anyBins` lists real CLI tools\n[ ] Title heading (# Title) immediately after frontmatter\n[ ] Summary paragraph after title\n[ ] \"When to Use\" section present\n[ ] At least 3 main content sections\n[ ] \"Tips\" section present at the end"
      },
      {
        "title": "Step 2: Frontmatter Quality",
        "body": "Description field audit\n\nThe description is the most impactful field. Evaluate it against these criteria:\n\nDESCRIPTION SCORING:\n\n[2] Starts with what the skill does (active verb)\n    GOOD: \"Write Makefiles for any project type.\"\n    BAD:  \"This skill covers Makefiles.\"\n    BAD:  \"A comprehensive guide to Make.\"\n\n[2] Includes trigger phrases (\"Use when...\")\n    GOOD: \"Use when setting up build automation, defining multi-target builds\"\n    BAD:  No trigger phrases at all\n\n[2] Specific scope (mentions concrete tools, languages, or operations)\n    GOOD: \"SQLite/PostgreSQL/MySQL — schema design, queries, CTEs, window functions\"\n    BAD:  \"Database stuff\"\n\n[1] Reasonable length (50-200 characters)\n    TOO SHORT: \"Make things\" (no search surface)\n    TOO LONG:  300+ characters (gets truncated)\n\n[1] Contains searchable keywords naturally\n    GOOD: \"cron jobs, systemd timers, scheduling\"\n    BAD:  Keywords stuffed unnaturally\n\nScore: __/8\n\nMetadata audit\n\nMETADATA SCORING:\n\n[1] emoji is relevant to the skill topic\n[1] requires.anyBins lists tools the skill actually uses (not generic tools like \"bash\")\n[1] os array is accurate (don't claim win32 if commands are Linux-only)\n[1] JSON is valid (test with a JSON parser)\n\nScore: __/4"
      },
      {
        "title": "Step 3: Content Quality",
        "body": "Example density\n\nCount code blocks and total lines:\n\nEXAMPLE DENSITY:\n\nLines:       ___\nCode blocks: ___\nRatio:       1 code block per ___ lines\n\nTARGET: 1 code block per 8-15 lines\n< 8  lines per block: possibly over-fragmented\n> 20 lines per block: needs more examples\n\nExample quality\n\nFor each code block, check:\n\nEXAMPLE QUALITY CHECKLIST:\n\n[ ] Language tag specified (```bash, ```python, etc.)\n[ ] Command is syntactically correct\n[ ] Output shown in comments where helpful\n[ ] Uses realistic values (not foo/bar/baz)\n[ ] No placeholder values left (TODO, FIXME, xxx)\n[ ] Self-contained (doesn't depend on undefined variables)\n    OR setup is shown/referenced\n[ ] Covers the common case (not just edge cases)\n\nScore each example 0-3:\n\n0: Broken or misleading\n1: Works but minimal (no output, no context)\n2: Good (correct, has output or explanation)\n3: Excellent (copy-pasteable, realistic, covers edge case)\n\nSection organization\n\nORGANIZATION SCORING:\n\n[2] Organized by task/scenario (not by abstract concept)\n    GOOD: \"## Encode and Decode\" → \"## Inspect Characters\" → \"## Convert Formats\"\n    BAD:  \"## Theory\" → \"## Types\" → \"## Advanced\"\n\n[2] Most common operations come first\n    GOOD: Basic usage → Variations → Advanced → Edge cases\n    BAD:  Configuration → Theory → Finally the basic usage\n\n[1] Sections are self-contained (can be used independently)\n\n[1] Consistent depth (not mixing h2 with h4 randomly)\n\nScore: __/6\n\nCross-platform accuracy\n\nPLATFORM CHECKLIST:\n\n[ ] macOS differences noted where relevant\n    (sed -i '' vs sed -i, brew vs apt, BSD vs GNU flags)\n[ ] Linux distro variations noted (apt vs yum vs pacman)\n[ ] Windows compatibility addressed if os includes \"win32\"\n[ ] Tool version assumptions stated (Docker v2 syntax, Python 3.x)"
      },
      {
        "title": "Step 4: Actionability Assessment",
        "body": "The core question: can an agent follow these instructions to produce correct results?\n\nACTIONABILITY SCORING:\n\n[3] Instructions are imperative (\"Run X\", \"Create Y\")\n    NOT: \"You might consider...\" or \"It's recommended to...\"\n\n[3] Steps are ordered logically (prerequisites before actions)\n\n[2] Error cases addressed (what to do when something fails)\n\n[2] Output/result described (how to verify it worked)\n\nScore: __/10"
      },
      {
        "title": "Step 5: Tips Section Quality",
        "body": "TIPS SCORING:\n\n[2] 5-10 tips present\n\n[2] Tips are non-obvious (not \"read the documentation\")\n    GOOD: \"The number one Makefile bug: spaces instead of tabs\"\n    BAD:  \"Make sure to test your code\"\n\n[2] Tips are specific and actionable\n    GOOD: \"Use flock to prevent overlapping cron runs\"\n    BAD:  \"Be careful with concurrent execution\"\n\n[1] No tips contradict the main content\n\n[1] Tips cover gotchas/footguns specific to this topic\n\nScore: __/8"
      },
      {
        "title": "Scoring Summary",
        "body": "SKILL REVIEW SCORECARD\n═══════════════════════════════════════\nSkill: [name]\nReviewer: [agent/human]\nDate: [date]\n\nCategory              Score    Max\n─────────────────────────────────────\nStructure             __       11\nDescription           __        8\nMetadata              __        4\nExample density       __        3*\nExample quality       __        3*\nOrganization          __        6\nActionability         __       10\nTips                  __        8\n─────────────────────────────────────\nTOTAL                 __       53+\n\n* Example density and quality are per-sample,\n  not summed. Use the average across all examples.\n\nRATING:\n  45+  Excellent — publish-ready\n  35-44 Good — minor improvements needed\n  25-34 Fair — significant gaps to address\n  < 25  Poor — needs major rework\n\nVERDICT: [PUBLISH / REVISE / REWORK]"
      },
      {
        "title": "Critical (blocks publishing)",
        "body": "DEFECT: Invalid frontmatter\nDETECT: YAML parse error, missing required fields\nFIX:    Validate YAML, ensure name/description/metadata all present\n\nDEFECT: Broken code examples\nDETECT: Syntax errors, undefined variables, wrong flags\nFIX:    Test every command in a clean environment\n\nDEFECT: Wrong tool requirements\nDETECT: metadata.requires lists tools not used in content, or omits tools that are used\nFIX:    Grep content for command names, update requires to match\n\nDEFECT: Misleading description\nDETECT: Description promises coverage the content doesn't deliver\nFIX:    Align description with actual content, or add missing content"
      },
      {
        "title": "Major (should fix before publishing)",
        "body": "DEFECT: No \"When to Use\" section\nIMPACT: Agent doesn't know when to activate the skill\nFIX:    Add 4-8 bullet points describing trigger scenarios\n\nDEFECT: Text walls without examples\nDETECT: Any section > 10 lines with no code block\nFIX:    Add concrete examples for every concept described\n\nDEFECT: Examples missing language tags\nDETECT: ``` without language identifier\nFIX:    Add bash, python, javascript, yaml, etc. to every code fence\n\nDEFECT: No Tips section\nIMPACT: Missing the distilled expertise that makes a skill valuable\nFIX:    Add 5-10 non-obvious, actionable tips\n\nDEFECT: Abstract organization\nDETECT: Sections named \"Theory\", \"Overview\", \"Background\", \"Introduction\"\nFIX:    Reorganize by task/operation: what the user is trying to DO"
      },
      {
        "title": "Minor (nice to fix)",
        "body": "DEFECT: Placeholder values\nDETECT: foo, bar, baz, example.com, 1.2.3.4, TODO, FIXME\nFIX:    Replace with realistic values (myapp, api.example.com, 192.168.1.100)\n\nDEFECT: Inconsistent formatting\nDETECT: Mixed heading levels, inconsistent code block style\nFIX:    Standardize heading hierarchy and formatting\n\nDEFECT: Missing cross-references\nDETECT: Mentions tools/concepts covered by other skills without referencing them\nFIX:    Add \"See the X skill for more on Y\" notes\n\nDEFECT: Outdated commands\nDETECT: docker-compose (v1), python (not python3), npm -g without npx alternative\nFIX:    Update to current tool versions and syntax"
      },
      {
        "title": "Comparative Review",
        "body": "When comparing skills in the same category:\n\nCOMPARATIVE CRITERIA:\n\n1. Coverage breadth\n   Which skill covers more use cases?\n\n2. Example quality\n   Which has more runnable, realistic examples?\n\n3. Depth on common operations\n   Which handles the 80% case better?\n\n4. Edge case coverage\n   Which addresses more gotchas and failure modes?\n\n5. Cross-platform support\n   Which works across more environments?\n\n6. Freshness\n   Which uses current tool versions and syntax?\n\nWINNER: [skill A / skill B / tie]\nREASON: [1-2 sentence justification]"
      },
      {
        "title": "Quick Review Template",
        "body": "For a fast review when you don't need full scoring:\n\n## Quick Review: [skill-name]\n\n**Structure**: [OK / Issues: ...]\n**Description**: [Strong / Weak: reason]\n**Examples**: [X code blocks across Y lines — density OK/low/high]\n**Actionability**: [Agent can/cannot follow these instructions because...]\n**Top defect**: [The single most impactful thing to fix]\n**Verdict**: [PUBLISH / REVISE / REWORK]"
      },
      {
        "title": "Reviewing your own skill before publishing",
        "body": "# 1. Validate frontmatter\nhead -20 skills/my-skill/SKILL.md\n# Visually confirm YAML is valid\n\n# 2. Count code blocks\ngrep -c '```' skills/my-skill/SKILL.md\n# Divide total lines by this number for density\n\n# 3. Check for placeholders\ngrep -n -i 'todo\\|fixme\\|xxx\\|foo\\|bar\\|baz' skills/my-skill/SKILL.md\n\n# 4. Check for missing language tags\ngrep -n '^```$' skills/my-skill/SKILL.md\n# Every code fence should have a language tag — bare ``` is a defect\n\n# 5. Verify tool requirements match content\n# Extract requires from frontmatter, then grep for each tool in content\n\n# 6. Test commands (sample 3-5 from the skill)\n# Run them in a clean shell to verify they work\n\n# 7. Run the scorecard mentally or in a file\n# Target: 35+ for good, 45+ for excellent"
      },
      {
        "title": "Reviewing a registry skill after installing",
        "body": "# Install the skill\nnpx molthub@latest install skill-name\n\n# Read it\ncat skills/skill-name/SKILL.md\n\n# Run the quick review template\n# If score < 25, consider uninstalling and finding an alternative"
      },
      {
        "title": "Tips",
        "body": "The description field accounts for more real-world impact than all other fields combined. A perfect skill with a bad description will never be found via search.\nCount code blocks as your first quality signal. Skills with fewer than 8 code blocks are almost always too abstract to be useful.\nTest 3-5 commands from the skill in a clean environment. If more than one fails, the skill wasn't tested before publishing.\n\"Organized by task\" vs. \"organized by concept\" is the single biggest structural quality differentiator. Good skills answer \"how do I do X?\" — bad skills explain \"what is X?\"\nA skill with great tips but weak examples is better than one with thorough examples but no tips. Tips encode expertise that examples alone don't convey.\nCheck the requires.anyBins against what the skill actually uses. A common defect is listing bash (which everything has) instead of the actual tools like docker, curl, or jq.\nShort skills (< 150 lines) usually aren't worth publishing — they don't provide enough value over a quick web search. If your skill is short, it might be better as a section in a larger skill.\nThe best skills are ones you'd bookmark yourself. If you wouldn't use it, don't publish it."
      }
    ],
    "body": "Skill Reviewer\n\nAudit agent skills (SKILL.md files) for quality, correctness, and completeness. Provides a structured review framework with scoring rubric, defect checklists, and improvement recommendations.\n\nWhen to Use\nReviewing a skill before publishing to the registry\nEvaluating a skill you downloaded from the registry\nAuditing your own skills for quality improvements\nComparing skills in the same category\nDeciding whether a skill is worth installing\nReview Process\nStep 1: Structural Check\n\nVerify the skill has the required structure. Read the file and check each item:\n\nSTRUCTURAL CHECKLIST:\n[ ] Valid YAML frontmatter (opens and closes with ---)\n[ ] `name` field present and is a valid slug (lowercase, hyphenated)\n[ ] `description` field present and non-empty\n[ ] `metadata` field present with valid JSON\n[ ] `metadata.clawdbot.emoji` is a single emoji\n[ ] `metadata.clawdbot.requires.anyBins` lists real CLI tools\n[ ] Title heading (# Title) immediately after frontmatter\n[ ] Summary paragraph after title\n[ ] \"When to Use\" section present\n[ ] At least 3 main content sections\n[ ] \"Tips\" section present at the end\n\nStep 2: Frontmatter Quality\nDescription field audit\n\nThe description is the most impactful field. Evaluate it against these criteria:\n\nDESCRIPTION SCORING:\n\n[2] Starts with what the skill does (active verb)\n    GOOD: \"Write Makefiles for any project type.\"\n    BAD:  \"This skill covers Makefiles.\"\n    BAD:  \"A comprehensive guide to Make.\"\n\n[2] Includes trigger phrases (\"Use when...\")\n    GOOD: \"Use when setting up build automation, defining multi-target builds\"\n    BAD:  No trigger phrases at all\n\n[2] Specific scope (mentions concrete tools, languages, or operations)\n    GOOD: \"SQLite/PostgreSQL/MySQL — schema design, queries, CTEs, window functions\"\n    BAD:  \"Database stuff\"\n\n[1] Reasonable length (50-200 characters)\n    TOO SHORT: \"Make things\" (no search surface)\n    TOO LONG:  300+ characters (gets truncated)\n\n[1] Contains searchable keywords naturally\n    GOOD: \"cron jobs, systemd timers, scheduling\"\n    BAD:  Keywords stuffed unnaturally\n\nScore: __/8\n\nMetadata audit\nMETADATA SCORING:\n\n[1] emoji is relevant to the skill topic\n[1] requires.anyBins lists tools the skill actually uses (not generic tools like \"bash\")\n[1] os array is accurate (don't claim win32 if commands are Linux-only)\n[1] JSON is valid (test with a JSON parser)\n\nScore: __/4\n\nStep 3: Content Quality\nExample density\n\nCount code blocks and total lines:\n\nEXAMPLE DENSITY:\n\nLines:       ___\nCode blocks: ___\nRatio:       1 code block per ___ lines\n\nTARGET: 1 code block per 8-15 lines\n< 8  lines per block: possibly over-fragmented\n> 20 lines per block: needs more examples\n\nExample quality\n\nFor each code block, check:\n\nEXAMPLE QUALITY CHECKLIST:\n\n[ ] Language tag specified (```bash, ```python, etc.)\n[ ] Command is syntactically correct\n[ ] Output shown in comments where helpful\n[ ] Uses realistic values (not foo/bar/baz)\n[ ] No placeholder values left (TODO, FIXME, xxx)\n[ ] Self-contained (doesn't depend on undefined variables)\n    OR setup is shown/referenced\n[ ] Covers the common case (not just edge cases)\n\n\nScore each example 0-3:\n\n0: Broken or misleading\n1: Works but minimal (no output, no context)\n2: Good (correct, has output or explanation)\n3: Excellent (copy-pasteable, realistic, covers edge case)\nSection organization\nORGANIZATION SCORING:\n\n[2] Organized by task/scenario (not by abstract concept)\n    GOOD: \"## Encode and Decode\" → \"## Inspect Characters\" → \"## Convert Formats\"\n    BAD:  \"## Theory\" → \"## Types\" → \"## Advanced\"\n\n[2] Most common operations come first\n    GOOD: Basic usage → Variations → Advanced → Edge cases\n    BAD:  Configuration → Theory → Finally the basic usage\n\n[1] Sections are self-contained (can be used independently)\n\n[1] Consistent depth (not mixing h2 with h4 randomly)\n\nScore: __/6\n\nCross-platform accuracy\nPLATFORM CHECKLIST:\n\n[ ] macOS differences noted where relevant\n    (sed -i '' vs sed -i, brew vs apt, BSD vs GNU flags)\n[ ] Linux distro variations noted (apt vs yum vs pacman)\n[ ] Windows compatibility addressed if os includes \"win32\"\n[ ] Tool version assumptions stated (Docker v2 syntax, Python 3.x)\n\nStep 4: Actionability Assessment\n\nThe core question: can an agent follow these instructions to produce correct results?\n\nACTIONABILITY SCORING:\n\n[3] Instructions are imperative (\"Run X\", \"Create Y\")\n    NOT: \"You might consider...\" or \"It's recommended to...\"\n\n[3] Steps are ordered logically (prerequisites before actions)\n\n[2] Error cases addressed (what to do when something fails)\n\n[2] Output/result described (how to verify it worked)\n\nScore: __/10\n\nStep 5: Tips Section Quality\nTIPS SCORING:\n\n[2] 5-10 tips present\n\n[2] Tips are non-obvious (not \"read the documentation\")\n    GOOD: \"The number one Makefile bug: spaces instead of tabs\"\n    BAD:  \"Make sure to test your code\"\n\n[2] Tips are specific and actionable\n    GOOD: \"Use flock to prevent overlapping cron runs\"\n    BAD:  \"Be careful with concurrent execution\"\n\n[1] No tips contradict the main content\n\n[1] Tips cover gotchas/footguns specific to this topic\n\nScore: __/8\n\nScoring Summary\nSKILL REVIEW SCORECARD\n═══════════════════════════════════════\nSkill: [name]\nReviewer: [agent/human]\nDate: [date]\n\nCategory              Score    Max\n─────────────────────────────────────\nStructure             __       11\nDescription           __        8\nMetadata              __        4\nExample density       __        3*\nExample quality       __        3*\nOrganization          __        6\nActionability         __       10\nTips                  __        8\n─────────────────────────────────────\nTOTAL                 __       53+\n\n* Example density and quality are per-sample,\n  not summed. Use the average across all examples.\n\nRATING:\n  45+  Excellent — publish-ready\n  35-44 Good — minor improvements needed\n  25-34 Fair — significant gaps to address\n  < 25  Poor — needs major rework\n\nVERDICT: [PUBLISH / REVISE / REWORK]\n\nCommon Defects\nCritical (blocks publishing)\nDEFECT: Invalid frontmatter\nDETECT: YAML parse error, missing required fields\nFIX:    Validate YAML, ensure name/description/metadata all present\n\nDEFECT: Broken code examples\nDETECT: Syntax errors, undefined variables, wrong flags\nFIX:    Test every command in a clean environment\n\nDEFECT: Wrong tool requirements\nDETECT: metadata.requires lists tools not used in content, or omits tools that are used\nFIX:    Grep content for command names, update requires to match\n\nDEFECT: Misleading description\nDETECT: Description promises coverage the content doesn't deliver\nFIX:    Align description with actual content, or add missing content\n\nMajor (should fix before publishing)\nDEFECT: No \"When to Use\" section\nIMPACT: Agent doesn't know when to activate the skill\nFIX:    Add 4-8 bullet points describing trigger scenarios\n\nDEFECT: Text walls without examples\nDETECT: Any section > 10 lines with no code block\nFIX:    Add concrete examples for every concept described\n\nDEFECT: Examples missing language tags\nDETECT: ``` without language identifier\nFIX:    Add bash, python, javascript, yaml, etc. to every code fence\n\nDEFECT: No Tips section\nIMPACT: Missing the distilled expertise that makes a skill valuable\nFIX:    Add 5-10 non-obvious, actionable tips\n\nDEFECT: Abstract organization\nDETECT: Sections named \"Theory\", \"Overview\", \"Background\", \"Introduction\"\nFIX:    Reorganize by task/operation: what the user is trying to DO\n\nMinor (nice to fix)\nDEFECT: Placeholder values\nDETECT: foo, bar, baz, example.com, 1.2.3.4, TODO, FIXME\nFIX:    Replace with realistic values (myapp, api.example.com, 192.168.1.100)\n\nDEFECT: Inconsistent formatting\nDETECT: Mixed heading levels, inconsistent code block style\nFIX:    Standardize heading hierarchy and formatting\n\nDEFECT: Missing cross-references\nDETECT: Mentions tools/concepts covered by other skills without referencing them\nFIX:    Add \"See the X skill for more on Y\" notes\n\nDEFECT: Outdated commands\nDETECT: docker-compose (v1), python (not python3), npm -g without npx alternative\nFIX:    Update to current tool versions and syntax\n\nComparative Review\n\nWhen comparing skills in the same category:\n\nCOMPARATIVE CRITERIA:\n\n1. Coverage breadth\n   Which skill covers more use cases?\n\n2. Example quality\n   Which has more runnable, realistic examples?\n\n3. Depth on common operations\n   Which handles the 80% case better?\n\n4. Edge case coverage\n   Which addresses more gotchas and failure modes?\n\n5. Cross-platform support\n   Which works across more environments?\n\n6. Freshness\n   Which uses current tool versions and syntax?\n\nWINNER: [skill A / skill B / tie]\nREASON: [1-2 sentence justification]\n\nQuick Review Template\n\nFor a fast review when you don't need full scoring:\n\n## Quick Review: [skill-name]\n\n**Structure**: [OK / Issues: ...]\n**Description**: [Strong / Weak: reason]\n**Examples**: [X code blocks across Y lines — density OK/low/high]\n**Actionability**: [Agent can/cannot follow these instructions because...]\n**Top defect**: [The single most impactful thing to fix]\n**Verdict**: [PUBLISH / REVISE / REWORK]\n\nReview Workflow\nReviewing your own skill before publishing\n# 1. Validate frontmatter\nhead -20 skills/my-skill/SKILL.md\n# Visually confirm YAML is valid\n\n# 2. Count code blocks\ngrep -c '```' skills/my-skill/SKILL.md\n# Divide total lines by this number for density\n\n# 3. Check for placeholders\ngrep -n -i 'todo\\|fixme\\|xxx\\|foo\\|bar\\|baz' skills/my-skill/SKILL.md\n\n# 4. Check for missing language tags\ngrep -n '^```$' skills/my-skill/SKILL.md\n# Every code fence should have a language tag — bare ``` is a defect\n\n# 5. Verify tool requirements match content\n# Extract requires from frontmatter, then grep for each tool in content\n\n# 6. Test commands (sample 3-5 from the skill)\n# Run them in a clean shell to verify they work\n\n# 7. Run the scorecard mentally or in a file\n# Target: 35+ for good, 45+ for excellent\n\nReviewing a registry skill after installing\n# Install the skill\nnpx molthub@latest install skill-name\n\n# Read it\ncat skills/skill-name/SKILL.md\n\n# Run the quick review template\n# If score < 25, consider uninstalling and finding an alternative\n\nTips\nThe description field accounts for more real-world impact than all other fields combined. A perfect skill with a bad description will never be found via search.\nCount code blocks as your first quality signal. Skills with fewer than 8 code blocks are almost always too abstract to be useful.\nTest 3-5 commands from the skill in a clean environment. If more than one fails, the skill wasn't tested before publishing.\n\"Organized by task\" vs. \"organized by concept\" is the single biggest structural quality differentiator. Good skills answer \"how do I do X?\" — bad skills explain \"what is X?\"\nA skill with great tips but weak examples is better than one with thorough examples but no tips. Tips encode expertise that examples alone don't convey.\nCheck the requires.anyBins against what the skill actually uses. A common defect is listing bash (which everything has) instead of the actual tools like docker, curl, or jq.\nShort skills (< 150 lines) usually aren't worth publishing — they don't provide enough value over a quick web search. If your skill is short, it might be better as a section in a larger skill.\nThe best skills are ones you'd bookmark yourself. If you wouldn't use it, don't publish it."
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/gitgoodordietrying/skill-reviewer",
    "publisherUrl": "https://clawhub.ai/gitgoodordietrying/skill-reviewer",
    "owner": "gitgoodordietrying",
    "version": "1.0.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/skill-reviewer",
    "downloadUrl": "https://openagent3.xyz/downloads/skill-reviewer",
    "agentUrl": "https://openagent3.xyz/skills/skill-reviewer/agent",
    "manifestUrl": "https://openagent3.xyz/skills/skill-reviewer/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/skill-reviewer/agent.md"
  }
}