{
  "schemaVersion": "1.0",
  "item": {
    "slug": "writing-skills",
    "name": "Writing Skills",
    "source": "tencent",
    "type": "skill",
    "category": "内容创作",
    "sourceUrl": "https://clawhub.ai/zlc000190/writing-skills",
    "canonicalUrl": "https://clawhub.ai/zlc000190/writing-skills",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/writing-skills",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=writing-skills",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SKILL.md",
      "anthropic-best-practices.md",
      "examples/CLAUDE_MD_TESTING.md",
      "persuasion-principles.md",
      "render-graphs.js",
      "testing-skills-with-subagents.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-30T16:55:25.780Z",
      "expiresAt": "2026-05-07T16:55:25.780Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
        "contentDisposition": "attachment; filename=\"network-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/writing-skills"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/writing-skills",
    "agentPageUrl": "https://openagent3.xyz/skills/writing-skills/agent",
    "manifestUrl": "https://openagent3.xyz/skills/writing-skills/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/writing-skills/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Overview",
        "body": "Writing skills IS Test-Driven Development applied to process documentation.\n\nPersonal skills live in agent-specific directories (~/.claude/skills for Claude Code, ~/.agents/skills/ for Codex)\n\nYou write test cases (pressure scenarios with subagents), watch them fail (baseline behavior), write the skill (documentation), watch tests pass (agents comply), and refactor (close loopholes).\n\nCore principle: If you didn't watch an agent fail without the skill, you don't know if the skill teaches the right thing.\n\nREQUIRED BACKGROUND: You MUST understand superpowers:test-driven-development before using this skill. That skill defines the fundamental RED-GREEN-REFACTOR cycle. This skill adapts TDD to documentation.\n\nOfficial guidance: For Anthropic's official skill authoring best practices, see anthropic-best-practices.md. This document provides additional patterns and guidelines that complement the TDD-focused approach in this skill."
      },
      {
        "title": "What is a Skill?",
        "body": "A skill is a reference guide for proven techniques, patterns, or tools. Skills help future Claude instances find and apply effective approaches.\n\nSkills are: Reusable techniques, patterns, tools, reference guides\n\nSkills are NOT: Narratives about how you solved a problem once"
      },
      {
        "title": "TDD Mapping for Skills",
        "body": "TDD ConceptSkill CreationTest casePressure scenario with subagentProduction codeSkill document (SKILL.md)Test fails (RED)Agent violates rule without skill (baseline)Test passes (GREEN)Agent complies with skill presentRefactorClose loopholes while maintaining complianceWrite test firstRun baseline scenario BEFORE writing skillWatch it failDocument exact rationalizations agent usesMinimal codeWrite skill addressing those specific violationsWatch it passVerify agent now compliesRefactor cycleFind new rationalizations → plug → re-verify\n\nThe entire skill creation process follows RED-GREEN-REFACTOR."
      },
      {
        "title": "When to Create a Skill",
        "body": "Create when:\n\nTechnique wasn't intuitively obvious to you\nYou'd reference this again across projects\nPattern applies broadly (not project-specific)\nOthers would benefit\n\nDon't create for:\n\nOne-off solutions\nStandard practices well-documented elsewhere\nProject-specific conventions (put in CLAUDE.md)\nMechanical constraints (if it's enforceable with regex/validation, automate it—save documentation for judgment calls)"
      },
      {
        "title": "Technique",
        "body": "Concrete method with steps to follow (condition-based-waiting, root-cause-tracing)"
      },
      {
        "title": "Pattern",
        "body": "Way of thinking about problems (flatten-with-flags, test-invariants)"
      },
      {
        "title": "Reference",
        "body": "API docs, syntax guides, tool documentation (office docs)"
      },
      {
        "title": "Directory Structure",
        "body": "skills/\n  skill-name/\n    SKILL.md              # Main reference (required)\n    supporting-file.*     # Only if needed\n\nFlat namespace - all skills in one searchable namespace\n\nSeparate files for:\n\nHeavy reference (100+ lines) - API docs, comprehensive syntax\nReusable tools - Scripts, utilities, templates\n\nKeep inline:\n\nPrinciples and concepts\nCode patterns (< 50 lines)\nEverything else"
      },
      {
        "title": "SKILL.md Structure",
        "body": "Frontmatter (YAML):\n\nOnly two fields supported: name and description\nMax 1024 characters total\nname: Use letters, numbers, and hyphens only (no parentheses, special chars)\ndescription: Third-person, describes ONLY when to use (NOT what it does)\n\nStart with \"Use when...\" to focus on triggering conditions\nInclude specific symptoms, situations, and contexts\nNEVER summarize the skill's process or workflow (see CSO section for why)\nKeep under 500 characters if possible\n\n---\nname: Skill-Name-With-Hyphens\ndescription: Use when [specific triggering conditions and symptoms]\n---\n\n# Skill Name\n\n## Overview\nWhat is this? Core principle in 1-2 sentences.\n\n## When to Use\n[Small inline flowchart IF decision non-obvious]\n\nBullet list with SYMPTOMS and use cases\nWhen NOT to use\n\n## Core Pattern (for techniques/patterns)\nBefore/after code comparison\n\n## Quick Reference\nTable or bullets for scanning common operations\n\n## Implementation\nInline code for simple patterns\nLink to file for heavy reference or reusable tools\n\n## Common Mistakes\nWhat goes wrong + fixes\n\n## Real-World Impact (optional)\nConcrete results"
      },
      {
        "title": "Claude Search Optimization (CSO)",
        "body": "Critical for discovery: Future Claude needs to FIND your skill"
      },
      {
        "title": "1. Rich Description Field",
        "body": "Purpose: Claude reads description to decide which skills to load for a given task. Make it answer: \"Should I read this skill right now?\"\n\nFormat: Start with \"Use when...\" to focus on triggering conditions\n\nCRITICAL: Description = When to Use, NOT What the Skill Does\n\nThe description should ONLY describe triggering conditions. Do NOT summarize the skill's process or workflow in the description.\n\nWhy this matters: Testing revealed that when a description summarizes the skill's workflow, Claude may follow the description instead of reading the full skill content. A description saying \"code review between tasks\" caused Claude to do ONE review, even though the skill's flowchart clearly showed TWO reviews (spec compliance then code quality).\n\nWhen the description was changed to just \"Use when executing implementation plans with independent tasks\" (no workflow summary), Claude correctly read the flowchart and followed the two-stage review process.\n\nThe trap: Descriptions that summarize workflow create a shortcut Claude will take. The skill body becomes documentation Claude skips.\n\n# ❌ BAD: Summarizes workflow - Claude may follow this instead of reading skill\ndescription: Use when executing plans - dispatches subagent per task with code review between tasks\n\n# ❌ BAD: Too much process detail\ndescription: Use for TDD - write test first, watch it fail, write minimal code, refactor\n\n# ✅ GOOD: Just triggering conditions, no workflow summary\ndescription: Use when executing implementation plans with independent tasks in the current session\n\n# ✅ GOOD: Triggering conditions only\ndescription: Use when implementing any feature or bugfix, before writing implementation code\n\nContent:\n\nUse concrete triggers, symptoms, and situations that signal this skill applies\nDescribe the problem (race conditions, inconsistent behavior) not language-specific symptoms (setTimeout, sleep)\nKeep triggers technology-agnostic unless the skill itself is technology-specific\nIf skill is technology-specific, make that explicit in the trigger\nWrite in third person (injected into system prompt)\nNEVER summarize the skill's process or workflow\n\n# ❌ BAD: Too abstract, vague, doesn't include when to use\ndescription: For async testing\n\n# ❌ BAD: First person\ndescription: I can help you with async tests when they're flaky\n\n# ❌ BAD: Mentions technology but skill isn't specific to it\ndescription: Use when tests use setTimeout/sleep and are flaky\n\n# ✅ GOOD: Starts with \"Use when\", describes problem, no workflow\ndescription: Use when tests have race conditions, timing dependencies, or pass/fail inconsistently\n\n# ✅ GOOD: Technology-specific skill with explicit trigger\ndescription: Use when using React Router and handling authentication redirects"
      },
      {
        "title": "2. Keyword Coverage",
        "body": "Use words Claude would search for:\n\nError messages: \"Hook timed out\", \"ENOTEMPTY\", \"race condition\"\nSymptoms: \"flaky\", \"hanging\", \"zombie\", \"pollution\"\nSynonyms: \"timeout/hang/freeze\", \"cleanup/teardown/afterEach\"\nTools: Actual commands, library names, file types"
      },
      {
        "title": "3. Descriptive Naming",
        "body": "Use active voice, verb-first:\n\n✅ creating-skills not skill-creation\n✅ condition-based-waiting not async-test-helpers"
      },
      {
        "title": "4. Token Efficiency (Critical)",
        "body": "Problem: getting-started and frequently-referenced skills load into EVERY conversation. Every token counts.\n\nTarget word counts:\n\ngetting-started workflows: <150 words each\nFrequently-loaded skills: <200 words total\nOther skills: <500 words (still be concise)\n\nTechniques:\n\nMove details to tool help:\n\n# ❌ BAD: Document all flags in SKILL.md\nsearch-conversations supports --text, --both, --after DATE, --before DATE, --limit N\n\n# ✅ GOOD: Reference --help\nsearch-conversations supports multiple modes and filters. Run --help for details.\n\nUse cross-references:\n\n# ❌ BAD: Repeat workflow details\nWhen searching, dispatch subagent with template...\n[20 lines of repeated instructions]\n\n# ✅ GOOD: Reference other skill\nAlways use subagents (50-100x context savings). REQUIRED: Use [other-skill-name] for workflow.\n\nCompress examples:\n\n# ❌ BAD: Verbose example (42 words)\nyour human partner: \"How did we handle authentication errors in React Router before?\"\nYou: I'll search past conversations for React Router authentication patterns.\n[Dispatch subagent with search query: \"React Router authentication error handling 401\"]\n\n# ✅ GOOD: Minimal example (20 words)\nPartner: \"How did we handle auth errors in React Router?\"\nYou: Searching...\n[Dispatch subagent → synthesis]\n\nEliminate redundancy:\n\nDon't repeat what's in cross-referenced skills\nDon't explain what's obvious from command\nDon't include multiple examples of same pattern\n\nVerification:\n\nwc -w skills/path/SKILL.md\n# getting-started workflows: aim for <150 each\n# Other frequently-loaded: aim for <200 total\n\nName by what you DO or core insight:\n\n✅ condition-based-waiting > async-test-helpers\n✅ using-skills not skill-usage\n✅ flatten-with-flags > data-structure-refactoring\n✅ root-cause-tracing > debugging-techniques\n\nGerunds (-ing) work well for processes:\n\ncreating-skills, testing-skills, debugging-with-logs\nActive, describes the action you're taking"
      },
      {
        "title": "4. Cross-Referencing Other Skills",
        "body": "When writing documentation that references other skills:\n\nUse skill name only, with explicit requirement markers:\n\n✅ Good: **REQUIRED SUB-SKILL:** Use superpowers:test-driven-development\n✅ Good: **REQUIRED BACKGROUND:** You MUST understand superpowers:systematic-debugging\n❌ Bad: See skills/testing/test-driven-development (unclear if required)\n❌ Bad: @skills/testing/test-driven-development/SKILL.md (force-loads, burns context)\n\nWhy no @ links: @ syntax force-loads files immediately, consuming 200k+ context before you need them."
      },
      {
        "title": "Flowchart Usage",
        "body": "digraph when_flowchart {\n    \"Need to show information?\" [shape=diamond];\n    \"Decision where I might go wrong?\" [shape=diamond];\n    \"Use markdown\" [shape=box];\n    \"Small inline flowchart\" [shape=box];\n\n    \"Need to show information?\" -> \"Decision where I might go wrong?\" [label=\"yes\"];\n    \"Decision where I might go wrong?\" -> \"Small inline flowchart\" [label=\"yes\"];\n    \"Decision where I might go wrong?\" -> \"Use markdown\" [label=\"no\"];\n}\n\nUse flowcharts ONLY for:\n\nNon-obvious decision points\nProcess loops where you might stop too early\n\"When to use A vs B\" decisions\n\nNever use flowcharts for:\n\nReference material → Tables, lists\nCode examples → Markdown blocks\nLinear instructions → Numbered lists\nLabels without semantic meaning (step1, helper2)\n\nSee @graphviz-conventions.dot for graphviz style rules.\n\nVisualizing for your human partner: Use render-graphs.js in this directory to render a skill's flowcharts to SVG:\n\n./render-graphs.js ../some-skill           # Each diagram separately\n./render-graphs.js ../some-skill --combine # All diagrams in one SVG"
      },
      {
        "title": "Code Examples",
        "body": "One excellent example beats many mediocre ones\n\nChoose most relevant language:\n\nTesting techniques → TypeScript/JavaScript\nSystem debugging → Shell/Python\nData processing → Python\n\nGood example:\n\nComplete and runnable\nWell-commented explaining WHY\nFrom real scenario\nShows pattern clearly\nReady to adapt (not generic template)\n\nDon't:\n\nImplement in 5+ languages\nCreate fill-in-the-blank templates\nWrite contrived examples\n\nYou're good at porting - one great example is enough."
      },
      {
        "title": "Self-Contained Skill",
        "body": "defense-in-depth/\n  SKILL.md    # Everything inline\n\nWhen: All content fits, no heavy reference needed"
      },
      {
        "title": "Skill with Reusable Tool",
        "body": "condition-based-waiting/\n  SKILL.md    # Overview + patterns\n  example.ts  # Working helpers to adapt\n\nWhen: Tool is reusable code, not just narrative"
      },
      {
        "title": "Skill with Heavy Reference",
        "body": "pptx/\n  SKILL.md       # Overview + workflows\n  pptxgenjs.md   # 600 lines API reference\n  ooxml.md       # 500 lines XML structure\n  scripts/       # Executable tools\n\nWhen: Reference material too large for inline"
      },
      {
        "title": "The Iron Law (Same as TDD)",
        "body": "NO SKILL WITHOUT A FAILING TEST FIRST\n\nThis applies to NEW skills AND EDITS to existing skills.\n\nWrite skill before testing? Delete it. Start over.\nEdit skill without testing? Same violation.\n\nNo exceptions:\n\nNot for \"simple additions\"\nNot for \"just adding a section\"\nNot for \"documentation updates\"\nDon't keep untested changes as \"reference\"\nDon't \"adapt\" while running tests\nDelete means delete\n\nREQUIRED BACKGROUND: The superpowers:test-driven-development skill explains why this matters. Same principles apply to documentation."
      },
      {
        "title": "Testing All Skill Types",
        "body": "Different skill types need different test approaches:"
      },
      {
        "title": "Discipline-Enforcing Skills (rules/requirements)",
        "body": "Examples: TDD, verification-before-completion, designing-before-coding\n\nTest with:\n\nAcademic questions: Do they understand the rules?\nPressure scenarios: Do they comply under stress?\nMultiple pressures combined: time + sunk cost + exhaustion\nIdentify rationalizations and add explicit counters\n\nSuccess criteria: Agent follows rule under maximum pressure"
      },
      {
        "title": "Technique Skills (how-to guides)",
        "body": "Examples: condition-based-waiting, root-cause-tracing, defensive-programming\n\nTest with:\n\nApplication scenarios: Can they apply the technique correctly?\nVariation scenarios: Do they handle edge cases?\nMissing information tests: Do instructions have gaps?\n\nSuccess criteria: Agent successfully applies technique to new scenario"
      },
      {
        "title": "Pattern Skills (mental models)",
        "body": "Examples: reducing-complexity, information-hiding concepts\n\nTest with:\n\nRecognition scenarios: Do they recognize when pattern applies?\nApplication scenarios: Can they use the mental model?\nCounter-examples: Do they know when NOT to apply?\n\nSuccess criteria: Agent correctly identifies when/how to apply pattern"
      },
      {
        "title": "Reference Skills (documentation/APIs)",
        "body": "Examples: API documentation, command references, library guides\n\nTest with:\n\nRetrieval scenarios: Can they find the right information?\nApplication scenarios: Can they use what they found correctly?\nGap testing: Are common use cases covered?\n\nSuccess criteria: Agent finds and correctly applies reference information"
      },
      {
        "title": "Common Rationalizations for Skipping Testing",
        "body": "ExcuseReality\"Skill is obviously clear\"Clear to you ≠ clear to other agents. Test it.\"It's just a reference\"References can have gaps, unclear sections. Test retrieval.\"Testing is overkill\"Untested skills have issues. Always. 15 min testing saves hours.\"I'll test if problems emerge\"Problems = agents can't use skill. Test BEFORE deploying.\"Too tedious to test\"Testing is less tedious than debugging bad skill in production.\"I'm confident it's good\"Overconfidence guarantees issues. Test anyway.\"Academic review is enough\"Reading ≠ using. Test application scenarios.\"No time to test\"Deploying untested skill wastes more time fixing it later.\n\nAll of these mean: Test before deploying. No exceptions."
      },
      {
        "title": "Bulletproofing Skills Against Rationalization",
        "body": "Skills that enforce discipline (like TDD) need to resist rationalization. Agents are smart and will find loopholes when under pressure.\n\nPsychology note: Understanding WHY persuasion techniques work helps you apply them systematically. See persuasion-principles.md for research foundation (Cialdini, 2021; Meincke et al., 2025) on authority, commitment, scarcity, social proof, and unity principles."
      },
      {
        "title": "Close Every Loophole Explicitly",
        "body": "Don't just state the rule - forbid specific workarounds:\n\nNo exceptions:\n\nDon't keep it as \"reference\"\nDon't \"adapt\" it while writing tests\nDon't look at it\nDelete means delete\n\n</Good>\n\n### Address \"Spirit vs Letter\" Arguments\n\nAdd foundational principle early:\n\n```markdown\n**Violating the letter of the rules is violating the spirit of the rules.**\n\nThis cuts off entire class of \"I'm following the spirit\" rationalizations."
      },
      {
        "title": "Build Rationalization Table",
        "body": "Capture rationalizations from baseline testing (see Testing section below). Every excuse agents make goes in the table:\n\n| Excuse | Reality |\n|--------|---------|\n| \"Too simple to test\" | Simple code breaks. Test takes 30 seconds. |\n| \"I'll test after\" | Tests passing immediately prove nothing. |\n| \"Tests after achieve same goals\" | Tests-after = \"what does this do?\" Tests-first = \"what should this do?\" |"
      },
      {
        "title": "Create Red Flags List",
        "body": "Make it easy for agents to self-check when rationalizing:\n\n## Red Flags - STOP and Start Over\n\n- Code before test\n- \"I already manually tested it\"\n- \"Tests after achieve the same purpose\"\n- \"It's about spirit not ritual\"\n- \"This is different because...\"\n\n**All of these mean: Delete code. Start over with TDD.**"
      },
      {
        "title": "Update CSO for Violation Symptoms",
        "body": "Add to description: symptoms of when you're ABOUT to violate the rule:\n\ndescription: use when implementing any feature or bugfix, before writing implementation code"
      },
      {
        "title": "RED-GREEN-REFACTOR for Skills",
        "body": "Follow the TDD cycle:"
      },
      {
        "title": "RED: Write Failing Test (Baseline)",
        "body": "Run pressure scenario with subagent WITHOUT the skill. Document exact behavior:\n\nWhat choices did they make?\nWhat rationalizations did they use (verbatim)?\nWhich pressures triggered violations?\n\nThis is \"watch the test fail\" - you must see what agents naturally do before writing the skill."
      },
      {
        "title": "GREEN: Write Minimal Skill",
        "body": "Write skill that addresses those specific rationalizations. Don't add extra content for hypothetical cases.\n\nRun same scenarios WITH skill. Agent should now comply."
      },
      {
        "title": "REFACTOR: Close Loopholes",
        "body": "Agent found new rationalization? Add explicit counter. Re-test until bulletproof.\n\nTesting methodology: See @testing-skills-with-subagents.md for the complete testing methodology:\n\nHow to write pressure scenarios\nPressure types (time, sunk cost, authority, exhaustion)\nPlugging holes systematically\nMeta-testing techniques"
      },
      {
        "title": "❌ Narrative Example",
        "body": "\"In session 2025-10-03, we found empty projectDir caused...\"\nWhy bad: Too specific, not reusable"
      },
      {
        "title": "❌ Multi-Language Dilution",
        "body": "example-js.js, example-py.py, example-go.go\nWhy bad: Mediocre quality, maintenance burden"
      },
      {
        "title": "❌ Code in Flowcharts",
        "body": "step1 [label=\"import fs\"];\nstep2 [label=\"read file\"];\n\nWhy bad: Can't copy-paste, hard to read"
      },
      {
        "title": "❌ Generic Labels",
        "body": "helper1, helper2, step3, pattern4\nWhy bad: Labels should have semantic meaning"
      },
      {
        "title": "STOP: Before Moving to Next Skill",
        "body": "After writing ANY skill, you MUST STOP and complete the deployment process.\n\nDo NOT:\n\nCreate multiple skills in batch without testing each\nMove to next skill before current one is verified\nSkip testing because \"batching is more efficient\"\n\nThe deployment checklist below is MANDATORY for EACH skill.\n\nDeploying untested skills = deploying untested code. It's a violation of quality standards."
      },
      {
        "title": "Skill Creation Checklist (TDD Adapted)",
        "body": "IMPORTANT: Use TodoWrite to create todos for EACH checklist item below.\n\nRED Phase - Write Failing Test:\n\nCreate pressure scenarios (3+ combined pressures for discipline skills)\n Run scenarios WITHOUT skill - document baseline behavior verbatim\n Identify patterns in rationalizations/failures\n\nGREEN Phase - Write Minimal Skill:\n\nName uses only letters, numbers, hyphens (no parentheses/special chars)\n YAML frontmatter with only name and description (max 1024 chars)\n Description starts with \"Use when...\" and includes specific triggers/symptoms\n Description written in third person\n Keywords throughout for search (errors, symptoms, tools)\n Clear overview with core principle\n Address specific baseline failures identified in RED\n Code inline OR link to separate file\n One excellent example (not multi-language)\n Run scenarios WITH skill - verify agents now comply\n\nREFACTOR Phase - Close Loopholes:\n\nIdentify NEW rationalizations from testing\n Add explicit counters (if discipline skill)\n Build rationalization table from all test iterations\n Create red flags list\n Re-test until bulletproof\n\nQuality Checks:\n\nSmall flowchart only if decision non-obvious\n Quick reference table\n Common mistakes section\n No narrative storytelling\n Supporting files only for tools or heavy reference\n\nDeployment:\n\nCommit skill to git and push to your fork (if configured)\n Consider contributing back via PR (if broadly useful)"
      },
      {
        "title": "Discovery Workflow",
        "body": "How future Claude finds your skill:\n\nEncounters problem (\"tests are flaky\")\nFinds SKILL (description matches)\nScans overview (is this relevant?)\nReads patterns (quick reference table)\nLoads example (only when implementing)\n\nOptimize for this flow - put searchable terms early and often."
      },
      {
        "title": "The Bottom Line",
        "body": "Creating skills IS TDD for process documentation.\n\nSame Iron Law: No skill without failing test first.\nSame cycle: RED (baseline) → GREEN (write skill) → REFACTOR (close loopholes).\nSame benefits: Better quality, fewer surprises, bulletproof results.\n\nIf you follow TDD for code, follow it for skills. It's the same discipline applied to documentation."
      }
    ],
    "body": "Writing Skills\nOverview\n\nWriting skills IS Test-Driven Development applied to process documentation.\n\nPersonal skills live in agent-specific directories (~/.claude/skills for Claude Code, ~/.agents/skills/ for Codex)\n\nYou write test cases (pressure scenarios with subagents), watch them fail (baseline behavior), write the skill (documentation), watch tests pass (agents comply), and refactor (close loopholes).\n\nCore principle: If you didn't watch an agent fail without the skill, you don't know if the skill teaches the right thing.\n\nREQUIRED BACKGROUND: You MUST understand superpowers:test-driven-development before using this skill. That skill defines the fundamental RED-GREEN-REFACTOR cycle. This skill adapts TDD to documentation.\n\nOfficial guidance: For Anthropic's official skill authoring best practices, see anthropic-best-practices.md. This document provides additional patterns and guidelines that complement the TDD-focused approach in this skill.\n\nWhat is a Skill?\n\nA skill is a reference guide for proven techniques, patterns, or tools. Skills help future Claude instances find and apply effective approaches.\n\nSkills are: Reusable techniques, patterns, tools, reference guides\n\nSkills are NOT: Narratives about how you solved a problem once\n\nTDD Mapping for Skills\nTDD Concept\tSkill Creation\nTest case\tPressure scenario with subagent\nProduction code\tSkill document (SKILL.md)\nTest fails (RED)\tAgent violates rule without skill (baseline)\nTest passes (GREEN)\tAgent complies with skill present\nRefactor\tClose loopholes while maintaining compliance\nWrite test first\tRun baseline scenario BEFORE writing skill\nWatch it fail\tDocument exact rationalizations agent uses\nMinimal code\tWrite skill addressing those specific violations\nWatch it pass\tVerify agent now complies\nRefactor cycle\tFind new rationalizations → plug → re-verify\n\nThe entire skill creation process follows RED-GREEN-REFACTOR.\n\nWhen to Create a Skill\n\nCreate when:\n\nTechnique wasn't intuitively obvious to you\nYou'd reference this again across projects\nPattern applies broadly (not project-specific)\nOthers would benefit\n\nDon't create for:\n\nOne-off solutions\nStandard practices well-documented elsewhere\nProject-specific conventions (put in CLAUDE.md)\nMechanical constraints (if it's enforceable with regex/validation, automate it—save documentation for judgment calls)\nSkill Types\nTechnique\n\nConcrete method with steps to follow (condition-based-waiting, root-cause-tracing)\n\nPattern\n\nWay of thinking about problems (flatten-with-flags, test-invariants)\n\nReference\n\nAPI docs, syntax guides, tool documentation (office docs)\n\nDirectory Structure\nskills/\n  skill-name/\n    SKILL.md              # Main reference (required)\n    supporting-file.*     # Only if needed\n\n\nFlat namespace - all skills in one searchable namespace\n\nSeparate files for:\n\nHeavy reference (100+ lines) - API docs, comprehensive syntax\nReusable tools - Scripts, utilities, templates\n\nKeep inline:\n\nPrinciples and concepts\nCode patterns (< 50 lines)\nEverything else\nSKILL.md Structure\n\nFrontmatter (YAML):\n\nOnly two fields supported: name and description\nMax 1024 characters total\nname: Use letters, numbers, and hyphens only (no parentheses, special chars)\ndescription: Third-person, describes ONLY when to use (NOT what it does)\nStart with \"Use when...\" to focus on triggering conditions\nInclude specific symptoms, situations, and contexts\nNEVER summarize the skill's process or workflow (see CSO section for why)\nKeep under 500 characters if possible\n---\nname: Skill-Name-With-Hyphens\ndescription: Use when [specific triggering conditions and symptoms]\n---\n\n# Skill Name\n\n## Overview\nWhat is this? Core principle in 1-2 sentences.\n\n## When to Use\n[Small inline flowchart IF decision non-obvious]\n\nBullet list with SYMPTOMS and use cases\nWhen NOT to use\n\n## Core Pattern (for techniques/patterns)\nBefore/after code comparison\n\n## Quick Reference\nTable or bullets for scanning common operations\n\n## Implementation\nInline code for simple patterns\nLink to file for heavy reference or reusable tools\n\n## Common Mistakes\nWhat goes wrong + fixes\n\n## Real-World Impact (optional)\nConcrete results\n\nClaude Search Optimization (CSO)\n\nCritical for discovery: Future Claude needs to FIND your skill\n\n1. Rich Description Field\n\nPurpose: Claude reads description to decide which skills to load for a given task. Make it answer: \"Should I read this skill right now?\"\n\nFormat: Start with \"Use when...\" to focus on triggering conditions\n\nCRITICAL: Description = When to Use, NOT What the Skill Does\n\nThe description should ONLY describe triggering conditions. Do NOT summarize the skill's process or workflow in the description.\n\nWhy this matters: Testing revealed that when a description summarizes the skill's workflow, Claude may follow the description instead of reading the full skill content. A description saying \"code review between tasks\" caused Claude to do ONE review, even though the skill's flowchart clearly showed TWO reviews (spec compliance then code quality).\n\nWhen the description was changed to just \"Use when executing implementation plans with independent tasks\" (no workflow summary), Claude correctly read the flowchart and followed the two-stage review process.\n\nThe trap: Descriptions that summarize workflow create a shortcut Claude will take. The skill body becomes documentation Claude skips.\n\n# ❌ BAD: Summarizes workflow - Claude may follow this instead of reading skill\ndescription: Use when executing plans - dispatches subagent per task with code review between tasks\n\n# ❌ BAD: Too much process detail\ndescription: Use for TDD - write test first, watch it fail, write minimal code, refactor\n\n# ✅ GOOD: Just triggering conditions, no workflow summary\ndescription: Use when executing implementation plans with independent tasks in the current session\n\n# ✅ GOOD: Triggering conditions only\ndescription: Use when implementing any feature or bugfix, before writing implementation code\n\n\nContent:\n\nUse concrete triggers, symptoms, and situations that signal this skill applies\nDescribe the problem (race conditions, inconsistent behavior) not language-specific symptoms (setTimeout, sleep)\nKeep triggers technology-agnostic unless the skill itself is technology-specific\nIf skill is technology-specific, make that explicit in the trigger\nWrite in third person (injected into system prompt)\nNEVER summarize the skill's process or workflow\n# ❌ BAD: Too abstract, vague, doesn't include when to use\ndescription: For async testing\n\n# ❌ BAD: First person\ndescription: I can help you with async tests when they're flaky\n\n# ❌ BAD: Mentions technology but skill isn't specific to it\ndescription: Use when tests use setTimeout/sleep and are flaky\n\n# ✅ GOOD: Starts with \"Use when\", describes problem, no workflow\ndescription: Use when tests have race conditions, timing dependencies, or pass/fail inconsistently\n\n# ✅ GOOD: Technology-specific skill with explicit trigger\ndescription: Use when using React Router and handling authentication redirects\n\n2. Keyword Coverage\n\nUse words Claude would search for:\n\nError messages: \"Hook timed out\", \"ENOTEMPTY\", \"race condition\"\nSymptoms: \"flaky\", \"hanging\", \"zombie\", \"pollution\"\nSynonyms: \"timeout/hang/freeze\", \"cleanup/teardown/afterEach\"\nTools: Actual commands, library names, file types\n3. Descriptive Naming\n\nUse active voice, verb-first:\n\n✅ creating-skills not skill-creation\n✅ condition-based-waiting not async-test-helpers\n4. Token Efficiency (Critical)\n\nProblem: getting-started and frequently-referenced skills load into EVERY conversation. Every token counts.\n\nTarget word counts:\n\ngetting-started workflows: <150 words each\nFrequently-loaded skills: <200 words total\nOther skills: <500 words (still be concise)\n\nTechniques:\n\nMove details to tool help:\n\n# ❌ BAD: Document all flags in SKILL.md\nsearch-conversations supports --text, --both, --after DATE, --before DATE, --limit N\n\n# ✅ GOOD: Reference --help\nsearch-conversations supports multiple modes and filters. Run --help for details.\n\n\nUse cross-references:\n\n# ❌ BAD: Repeat workflow details\nWhen searching, dispatch subagent with template...\n[20 lines of repeated instructions]\n\n# ✅ GOOD: Reference other skill\nAlways use subagents (50-100x context savings). REQUIRED: Use [other-skill-name] for workflow.\n\n\nCompress examples:\n\n# ❌ BAD: Verbose example (42 words)\nyour human partner: \"How did we handle authentication errors in React Router before?\"\nYou: I'll search past conversations for React Router authentication patterns.\n[Dispatch subagent with search query: \"React Router authentication error handling 401\"]\n\n# ✅ GOOD: Minimal example (20 words)\nPartner: \"How did we handle auth errors in React Router?\"\nYou: Searching...\n[Dispatch subagent → synthesis]\n\n\nEliminate redundancy:\n\nDon't repeat what's in cross-referenced skills\nDon't explain what's obvious from command\nDon't include multiple examples of same pattern\n\nVerification:\n\nwc -w skills/path/SKILL.md\n# getting-started workflows: aim for <150 each\n# Other frequently-loaded: aim for <200 total\n\n\nName by what you DO or core insight:\n\n✅ condition-based-waiting > async-test-helpers\n✅ using-skills not skill-usage\n✅ flatten-with-flags > data-structure-refactoring\n✅ root-cause-tracing > debugging-techniques\n\nGerunds (-ing) work well for processes:\n\ncreating-skills, testing-skills, debugging-with-logs\nActive, describes the action you're taking\n4. Cross-Referencing Other Skills\n\nWhen writing documentation that references other skills:\n\nUse skill name only, with explicit requirement markers:\n\n✅ Good: **REQUIRED SUB-SKILL:** Use superpowers:test-driven-development\n✅ Good: **REQUIRED BACKGROUND:** You MUST understand superpowers:systematic-debugging\n❌ Bad: See skills/testing/test-driven-development (unclear if required)\n❌ Bad: @skills/testing/test-driven-development/SKILL.md (force-loads, burns context)\n\nWhy no @ links: @ syntax force-loads files immediately, consuming 200k+ context before you need them.\n\nFlowchart Usage\ndigraph when_flowchart {\n    \"Need to show information?\" [shape=diamond];\n    \"Decision where I might go wrong?\" [shape=diamond];\n    \"Use markdown\" [shape=box];\n    \"Small inline flowchart\" [shape=box];\n\n    \"Need to show information?\" -> \"Decision where I might go wrong?\" [label=\"yes\"];\n    \"Decision where I might go wrong?\" -> \"Small inline flowchart\" [label=\"yes\"];\n    \"Decision where I might go wrong?\" -> \"Use markdown\" [label=\"no\"];\n}\n\n\nUse flowcharts ONLY for:\n\nNon-obvious decision points\nProcess loops where you might stop too early\n\"When to use A vs B\" decisions\n\nNever use flowcharts for:\n\nReference material → Tables, lists\nCode examples → Markdown blocks\nLinear instructions → Numbered lists\nLabels without semantic meaning (step1, helper2)\n\nSee @graphviz-conventions.dot for graphviz style rules.\n\nVisualizing for your human partner: Use render-graphs.js in this directory to render a skill's flowcharts to SVG:\n\n./render-graphs.js ../some-skill           # Each diagram separately\n./render-graphs.js ../some-skill --combine # All diagrams in one SVG\n\nCode Examples\n\nOne excellent example beats many mediocre ones\n\nChoose most relevant language:\n\nTesting techniques → TypeScript/JavaScript\nSystem debugging → Shell/Python\nData processing → Python\n\nGood example:\n\nComplete and runnable\nWell-commented explaining WHY\nFrom real scenario\nShows pattern clearly\nReady to adapt (not generic template)\n\nDon't:\n\nImplement in 5+ languages\nCreate fill-in-the-blank templates\nWrite contrived examples\n\nYou're good at porting - one great example is enough.\n\nFile Organization\nSelf-Contained Skill\ndefense-in-depth/\n  SKILL.md    # Everything inline\n\n\nWhen: All content fits, no heavy reference needed\n\nSkill with Reusable Tool\ncondition-based-waiting/\n  SKILL.md    # Overview + patterns\n  example.ts  # Working helpers to adapt\n\n\nWhen: Tool is reusable code, not just narrative\n\nSkill with Heavy Reference\npptx/\n  SKILL.md       # Overview + workflows\n  pptxgenjs.md   # 600 lines API reference\n  ooxml.md       # 500 lines XML structure\n  scripts/       # Executable tools\n\n\nWhen: Reference material too large for inline\n\nThe Iron Law (Same as TDD)\nNO SKILL WITHOUT A FAILING TEST FIRST\n\n\nThis applies to NEW skills AND EDITS to existing skills.\n\nWrite skill before testing? Delete it. Start over. Edit skill without testing? Same violation.\n\nNo exceptions:\n\nNot for \"simple additions\"\nNot for \"just adding a section\"\nNot for \"documentation updates\"\nDon't keep untested changes as \"reference\"\nDon't \"adapt\" while running tests\nDelete means delete\n\nREQUIRED BACKGROUND: The superpowers:test-driven-development skill explains why this matters. Same principles apply to documentation.\n\nTesting All Skill Types\n\nDifferent skill types need different test approaches:\n\nDiscipline-Enforcing Skills (rules/requirements)\n\nExamples: TDD, verification-before-completion, designing-before-coding\n\nTest with:\n\nAcademic questions: Do they understand the rules?\nPressure scenarios: Do they comply under stress?\nMultiple pressures combined: time + sunk cost + exhaustion\nIdentify rationalizations and add explicit counters\n\nSuccess criteria: Agent follows rule under maximum pressure\n\nTechnique Skills (how-to guides)\n\nExamples: condition-based-waiting, root-cause-tracing, defensive-programming\n\nTest with:\n\nApplication scenarios: Can they apply the technique correctly?\nVariation scenarios: Do they handle edge cases?\nMissing information tests: Do instructions have gaps?\n\nSuccess criteria: Agent successfully applies technique to new scenario\n\nPattern Skills (mental models)\n\nExamples: reducing-complexity, information-hiding concepts\n\nTest with:\n\nRecognition scenarios: Do they recognize when pattern applies?\nApplication scenarios: Can they use the mental model?\nCounter-examples: Do they know when NOT to apply?\n\nSuccess criteria: Agent correctly identifies when/how to apply pattern\n\nReference Skills (documentation/APIs)\n\nExamples: API documentation, command references, library guides\n\nTest with:\n\nRetrieval scenarios: Can they find the right information?\nApplication scenarios: Can they use what they found correctly?\nGap testing: Are common use cases covered?\n\nSuccess criteria: Agent finds and correctly applies reference information\n\nCommon Rationalizations for Skipping Testing\nExcuse\tReality\n\"Skill is obviously clear\"\tClear to you ≠ clear to other agents. Test it.\n\"It's just a reference\"\tReferences can have gaps, unclear sections. Test retrieval.\n\"Testing is overkill\"\tUntested skills have issues. Always. 15 min testing saves hours.\n\"I'll test if problems emerge\"\tProblems = agents can't use skill. Test BEFORE deploying.\n\"Too tedious to test\"\tTesting is less tedious than debugging bad skill in production.\n\"I'm confident it's good\"\tOverconfidence guarantees issues. Test anyway.\n\"Academic review is enough\"\tReading ≠ using. Test application scenarios.\n\"No time to test\"\tDeploying untested skill wastes more time fixing it later.\n\nAll of these mean: Test before deploying. No exceptions.\n\nBulletproofing Skills Against Rationalization\n\nSkills that enforce discipline (like TDD) need to resist rationalization. Agents are smart and will find loopholes when under pressure.\n\nPsychology note: Understanding WHY persuasion techniques work helps you apply them systematically. See persuasion-principles.md for research foundation (Cialdini, 2021; Meincke et al., 2025) on authority, commitment, scarcity, social proof, and unity principles.\n\nClose Every Loophole Explicitly\n\nDon't just state the rule - forbid specific workarounds:\n\n<Bad> ```markdown Write code before test? Delete it. ``` </Bad> <Good> ```markdown Write code before test? Delete it. Start over.\n\nNo exceptions:\n\nDon't keep it as \"reference\"\nDon't \"adapt\" it while writing tests\nDon't look at it\nDelete means delete\n</Good>\n\n### Address \"Spirit vs Letter\" Arguments\n\nAdd foundational principle early:\n\n```markdown\n**Violating the letter of the rules is violating the spirit of the rules.**\n\n\nThis cuts off entire class of \"I'm following the spirit\" rationalizations.\n\nBuild Rationalization Table\n\nCapture rationalizations from baseline testing (see Testing section below). Every excuse agents make goes in the table:\n\n| Excuse | Reality |\n|--------|---------|\n| \"Too simple to test\" | Simple code breaks. Test takes 30 seconds. |\n| \"I'll test after\" | Tests passing immediately prove nothing. |\n| \"Tests after achieve same goals\" | Tests-after = \"what does this do?\" Tests-first = \"what should this do?\" |\n\nCreate Red Flags List\n\nMake it easy for agents to self-check when rationalizing:\n\n## Red Flags - STOP and Start Over\n\n- Code before test\n- \"I already manually tested it\"\n- \"Tests after achieve the same purpose\"\n- \"It's about spirit not ritual\"\n- \"This is different because...\"\n\n**All of these mean: Delete code. Start over with TDD.**\n\nUpdate CSO for Violation Symptoms\n\nAdd to description: symptoms of when you're ABOUT to violate the rule:\n\ndescription: use when implementing any feature or bugfix, before writing implementation code\n\nRED-GREEN-REFACTOR for Skills\n\nFollow the TDD cycle:\n\nRED: Write Failing Test (Baseline)\n\nRun pressure scenario with subagent WITHOUT the skill. Document exact behavior:\n\nWhat choices did they make?\nWhat rationalizations did they use (verbatim)?\nWhich pressures triggered violations?\n\nThis is \"watch the test fail\" - you must see what agents naturally do before writing the skill.\n\nGREEN: Write Minimal Skill\n\nWrite skill that addresses those specific rationalizations. Don't add extra content for hypothetical cases.\n\nRun same scenarios WITH skill. Agent should now comply.\n\nREFACTOR: Close Loopholes\n\nAgent found new rationalization? Add explicit counter. Re-test until bulletproof.\n\nTesting methodology: See @testing-skills-with-subagents.md for the complete testing methodology:\n\nHow to write pressure scenarios\nPressure types (time, sunk cost, authority, exhaustion)\nPlugging holes systematically\nMeta-testing techniques\nAnti-Patterns\n❌ Narrative Example\n\n\"In session 2025-10-03, we found empty projectDir caused...\" Why bad: Too specific, not reusable\n\n❌ Multi-Language Dilution\n\nexample-js.js, example-py.py, example-go.go Why bad: Mediocre quality, maintenance burden\n\n❌ Code in Flowcharts\nstep1 [label=\"import fs\"];\nstep2 [label=\"read file\"];\n\n\nWhy bad: Can't copy-paste, hard to read\n\n❌ Generic Labels\n\nhelper1, helper2, step3, pattern4 Why bad: Labels should have semantic meaning\n\nSTOP: Before Moving to Next Skill\n\nAfter writing ANY skill, you MUST STOP and complete the deployment process.\n\nDo NOT:\n\nCreate multiple skills in batch without testing each\nMove to next skill before current one is verified\nSkip testing because \"batching is more efficient\"\n\nThe deployment checklist below is MANDATORY for EACH skill.\n\nDeploying untested skills = deploying untested code. It's a violation of quality standards.\n\nSkill Creation Checklist (TDD Adapted)\n\nIMPORTANT: Use TodoWrite to create todos for EACH checklist item below.\n\nRED Phase - Write Failing Test:\n\n Create pressure scenarios (3+ combined pressures for discipline skills)\n Run scenarios WITHOUT skill - document baseline behavior verbatim\n Identify patterns in rationalizations/failures\n\nGREEN Phase - Write Minimal Skill:\n\n Name uses only letters, numbers, hyphens (no parentheses/special chars)\n YAML frontmatter with only name and description (max 1024 chars)\n Description starts with \"Use when...\" and includes specific triggers/symptoms\n Description written in third person\n Keywords throughout for search (errors, symptoms, tools)\n Clear overview with core principle\n Address specific baseline failures identified in RED\n Code inline OR link to separate file\n One excellent example (not multi-language)\n Run scenarios WITH skill - verify agents now comply\n\nREFACTOR Phase - Close Loopholes:\n\n Identify NEW rationalizations from testing\n Add explicit counters (if discipline skill)\n Build rationalization table from all test iterations\n Create red flags list\n Re-test until bulletproof\n\nQuality Checks:\n\n Small flowchart only if decision non-obvious\n Quick reference table\n Common mistakes section\n No narrative storytelling\n Supporting files only for tools or heavy reference\n\nDeployment:\n\n Commit skill to git and push to your fork (if configured)\n Consider contributing back via PR (if broadly useful)\nDiscovery Workflow\n\nHow future Claude finds your skill:\n\nEncounters problem (\"tests are flaky\")\nFinds SKILL (description matches)\nScans overview (is this relevant?)\nReads patterns (quick reference table)\nLoads example (only when implementing)\n\nOptimize for this flow - put searchable terms early and often.\n\nThe Bottom Line\n\nCreating skills IS TDD for process documentation.\n\nSame Iron Law: No skill without failing test first. Same cycle: RED (baseline) → GREEN (write skill) → REFACTOR (close loopholes). Same benefits: Better quality, fewer surprises, bulletproof results.\n\nIf you follow TDD for code, follow it for skills. It's the same discipline applied to documentation."
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/zlc000190/writing-skills",
    "publisherUrl": "https://clawhub.ai/zlc000190/writing-skills",
    "owner": "zlc000190",
    "version": "0.1.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/writing-skills",
    "downloadUrl": "https://openagent3.xyz/downloads/writing-skills",
    "agentUrl": "https://openagent3.xyz/skills/writing-skills/agent",
    "manifestUrl": "https://openagent3.xyz/skills/writing-skills/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/writing-skills/agent.md"
  }
}