{
  "schemaVersion": "1.0",
  "item": {
    "slug": "afrexai-prd-engine",
    "name": "PRD Engine",
    "source": "tencent",
    "type": "skill",
    "category": "开发工具",
    "sourceUrl": "https://clawhub.ai/1kalin/afrexai-prd-engine",
    "canonicalUrl": "https://clawhub.ai/1kalin/afrexai-prd-engine",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/afrexai-prd-engine",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=afrexai-prd-engine",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "README.md",
      "SKILL.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-30T16:55:25.780Z",
      "expiresAt": "2026-05-07T16:55:25.780Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
        "contentDisposition": "attachment; filename=\"network-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/afrexai-prd-engine"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/afrexai-prd-engine",
    "agentPageUrl": "https://openagent3.xyz/skills/afrexai-prd-engine/agent",
    "manifestUrl": "https://openagent3.xyz/skills/afrexai-prd-engine/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/afrexai-prd-engine/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "PRD Engine — Product Requirements That Ship",
        "body": "Complete product requirements methodology: from idea to spec to shipped feature. Not just a JSON template — a full system for writing PRDs that developers actually follow and stakeholders actually approve."
      },
      {
        "title": "When to Use This Skill",
        "body": "Turning a vague idea into a buildable specification\nWriting PRDs for new features, products, or major refactors\nReviewing/improving existing PRDs before sprint planning\nBreaking epics into right-sized user stories\nCreating technical design documents alongside product specs\nPreparing specs for AI coding agents (Claude Code, Cursor, Copilot)"
      },
      {
        "title": "Phase 1: Discovery Brief",
        "body": "Before writing a single requirement, answer these questions. Skip this and you'll rewrite the PRD 3 times."
      },
      {
        "title": "Problem Validation Checklist",
        "body": "discovery_brief:\n  problem:\n    statement: \"\" # One sentence. If you need two, you don't understand it yet.\n    who_has_it: \"\" # Specific persona, not \"users\"\n    frequency: \"\" # Daily? Weekly? Once? (daily problems > occasional ones)\n    current_workaround: \"\" # What do they do today? (no workaround = maybe not a real problem)\n    evidence:\n      - type: \"\" # support_ticket | user_interview | analytics | churned_user | sales_objection\n        detail: \"\"\n        date: \"\"\n\n  impact:\n    users_affected: \"\" # Number or percentage\n    revenue_impact: \"\" # $ at risk or $ opportunity\n    strategic_alignment: \"\" # Which company goal does this serve?\n\n  constraints:\n    deadline: \"\" # Hard date or flexible?\n    budget: \"\" # Engineering weeks available\n    dependencies: \"\" # What must exist first?\n    regulatory: \"\" # Any compliance requirements?\n\n  success_metrics:\n    primary: \"\" # ONE metric that defines success\n    secondary: [] # 2-3 supporting metrics\n    measurement_method: \"\" # How will you actually measure this?\n    target: \"\" # Specific number, not \"improve\"\n    timeframe: \"\" # When do you expect to see results?"
      },
      {
        "title": "Problem Statement Formula",
        "body": "[Persona] needs [capability] because [reason], but currently [blocker], which causes [measurable impact].\n\nExamples:\n\n❌ \"Users need better onboarding\" (vague, unmeasurable)\n✅ \"New free-trial users (500/month) need to reach their first 'aha moment' within 10 minutes because 73% who don't will churn within 48 hours, but currently the average time-to-value is 34 minutes due to a 12-step setup wizard, which costs us ~$18K/month in lost conversions.\""
      },
      {
        "title": "Kill Criteria",
        "body": "Before proceeding, check these. If any are true, STOP and push back:\n\nSignalActionNo evidence of the problem (just someone's opinion)Demand evidence. Opinions aren't requirements.Solution already decided (\"just build X\")Rewind to the problem. Solutions without problems = features nobody uses.Success metric is unmeasurableDefine how you'll measure it or don't build it.Affects <1% of users with no revenue impactDeprioritize. Small problems with small impact = small returns.Scope keeps expanding during discoveryScope lock. If everything is in scope, nothing is."
      },
      {
        "title": "PRD Template",
        "body": "# PRD: [Feature Name]\n\n**Author:** [Name]\n**Status:** Draft | In Review | Approved | In Progress | Shipped\n**Created:** YYYY-MM-DD\n**Last Updated:** YYYY-MM-DD\n**Approvers:** [Names + roles]\n\n## 1. Problem & Opportunity\n\n[Problem statement from discovery brief — one paragraph max]\n\n### Evidence\n- [Evidence point 1 — with data]\n- [Evidence point 2 — with data]\n\n### Impact\n- Users affected: [number]\n- Revenue impact: [$ amount or % change]\n- Strategic goal: [which one]\n\n## 2. Solution Overview\n\n[2-3 paragraphs max. What are we building and why this approach?]\n\n### What This Is\n- [Bullet list of what's in scope]\n\n### What This Is NOT\n- [Explicit exclusions — this prevents scope creep]\n\n### Key Decisions Made\n| Decision | Options Considered | Chosen | Rationale |\n|----------|-------------------|--------|-----------|\n| [Decision 1] | A, B, C | B | [Why] |\n\n## 3. User Stories\n\n[See Phase 3 below for story writing methodology]\n\n## 4. Design & UX\n\n### User Flow\n1. User [action] →\n2. System [response] →\n3. User sees [outcome]\n\n### Wireframes/Mockups\n[Link to Figma/screenshots or describe key screens]\n\n### Edge Cases\n| Scenario | Expected Behavior |\n|----------|------------------|\n| [Edge case 1] | [What happens] |\n| [Edge case 2] | [What happens] |\n| Empty state | [What user sees with no data] |\n| Error state | [What user sees on failure] |\n| Slow connection | [Loading behavior] |\n\n## 5. Technical Considerations\n\n### Architecture Notes\n- [Key technical decisions]\n- [New services/APIs needed]\n- [Database changes]\n\n### Dependencies\n- [External service X]\n- [Team Y's API]\n- [Library Z]\n\n### Performance Requirements\n- Page load: <[X]ms\n- API response: <[X]ms\n- Concurrent users: [X]\n\n### Security & Privacy\n- [Data handling requirements]\n- [Auth/permissions needed]\n- [PII considerations]\n\n## 6. Release Plan\n\n### Rollout Strategy\n- [ ] Feature flag: [flag name]\n- [ ] Beta group: [who]\n- [ ] % rollout: [10% → 50% → 100%]\n- [ ] Rollback plan: [how]\n\n### Launch Checklist\n- [ ] QA sign-off\n- [ ] Analytics events implemented\n- [ ] Monitoring/alerts configured\n- [ ] Documentation updated\n- [ ] Support team briefed\n- [ ] Stakeholders notified\n\n## 7. Success Criteria\n\n| Metric | Current | Target | Timeframe |\n|--------|---------|--------|-----------|\n| [Primary metric] | [X] | [Y] | [Z weeks] |\n| [Secondary metric] | [X] | [Y] | [Z weeks] |\n\n### Post-Launch Review\n- **1-week check:** [What to look at]\n- **1-month review:** [What to measure]\n- **Kill/iterate decision:** [Criteria for each]"
      },
      {
        "title": "PRD Quality Rubric (score before sharing)",
        "body": "Dimension0-2 (Weak)3-4 (Adequate)5 (Strong)WeightProblem clarityVague, no dataClear but thin evidenceSharp statement + multiple evidence pointsx4Scope disciplineEverything in scopeSome boundariesExplicit in/out + \"what this is NOT\"x3Story qualityVague tasksStories with some criteriaINVEST stories + verifiable acceptance criteriax4Edge casesNone listedHappy path + 1-2 edgesComprehensive: empty, error, slow, permissions, concurrentx3Success metrics\"Improve X\"Metric + targetMetric + baseline + target + timeframe + measurement methodx3Technical feasibilityNo tech sectionArchitecture notesDependencies, performance, security, migration planx2Release planNone\"Ship it\"Feature flag + rollout % + rollback + launch checklistx1\n\nScoring: Sum (score × weight). Max = 100.\n\n80-100: Ship-ready. Get approvals.\n60-79: Solid but missing pieces. Fill gaps before review.\n40-59: Needs work. Major sections incomplete.\n<40: Start over or go back to discovery."
      },
      {
        "title": "Story Format",
        "body": "story:\n  id: \"US-001\"\n  title: \"\" # Action-oriented: \"Add priority field to tasks table\"\n  persona: \"\" # Who benefits\n  narrative: \"As a [persona], I want [capability] so that [benefit]\"\n  acceptance_criteria:\n    - criterion: \"\" # Verifiable statement\n      type: \"functional\" # functional | performance | security | ux\n  priority: 1 # Execution order (dependencies first)\n  size: \"\" # XS | S | M | L | XL\n  status: \"todo\" # todo | in_progress | review | done\n  notes: \"\" # Runtime observations\n  depends_on: [] # Story IDs this depends on\n  blocked_by: [] # External blockers"
      },
      {
        "title": "INVEST Checklist (every story must pass)",
        "body": "LetterCriterionTestI — IndependentCan be built without other incomplete storiesNo circular dependenciesN — NegotiableDetails can flex (the \"what\" is fixed, the \"how\" is flexible)Multiple implementation approaches existV — ValuableDelivers user or business value on its ownWould a user/stakeholder care if only this shipped?E — EstimableTeam can size itNo major unknowns (if unknowns exist, add a spike first)S — SmallCompletable in one sprint (or one context window for AI agents)1-3 days of work maxT — TestableHas verifiable acceptance criteriaCan write a test for each criterion"
      },
      {
        "title": "Acceptance Criteria Rules",
        "body": "Good criteria are:\n\nBinary (pass/fail, not subjective)\nSpecific (numbers, not adjectives)\nIndependent (testable in isolation)\n\n❌ Bad✅ Good\"Works correctly\"\"Returns 200 with JSON body containing id, name, status fields\"\"Fast enough\"\"API responds in <200ms at p95 with 100 concurrent users\"\"User-friendly\"\"Form shows inline validation errors within 100ms of field blur\"\"Secure\"\"Endpoint returns 403 for users without admin role\"\"Handles errors\"\"On network timeout, shows retry button + cached data if available\"\n\nAlways include these universal criteria:\n\nTypecheck passes (tsc --noEmit --strict) (for TypeScript projects)\nAll existing tests still pass\nNew functionality has test coverage"
      },
      {
        "title": "Story Sizing Guide",
        "body": "SizeScopeTimeExampleXSConfig change, copy update, env var<2 hours\"Update error message text\"SSingle component/function, no new deps2-4 hours\"Add date picker to form\"MFeature slice: DB + API + UI1-2 days\"Add task priority with filter\"LMulti-component feature, new patterns2-3 days\"Add real-time notifications\"XLToo big. Split it.——"
      },
      {
        "title": "Story Ordering: The Dependency Pyramid",
        "body": "Always order stories bottom-up:\n\nLevel 1: Schema & Data (migrations, models, seed data)\n    ↑\nLevel 2: Backend Logic (services, APIs, business rules)\n    ↑\nLevel 3: Integration (API routes, auth, middleware)\n    ↑\nLevel 4: UI Components (forms, tables, modals)\n    ↑\nLevel 5: UX Polish (animations, empty states, loading)\n    ↑\nLevel 6: Analytics & Monitoring (events, dashboards)\n\nEach level depends ONLY on levels below it. Never build UI before the API exists."
      },
      {
        "title": "Splitting Strategies",
        "body": "When a story is too big, split using one of these patterns:\n\nStrategyWhen to UseExampleBy layerFull-stack feature\"Add schema\" → \"Add API\" → \"Add UI\"By operationCRUD feature\"Create task\" → \"Read/list tasks\" → \"Update task\" → \"Delete task\"By personaMulti-role feature\"Admin creates template\" → \"User fills template\" → \"Viewer sees results\"By happy/sad pathComplex flows\"Successful payment\" → \"Payment declined\" → \"Payment timeout\"By platformCross-platform\"iOS support\" → \"Android support\" → \"Web support\"Spike + implementHigh uncertainty\"Spike: evaluate auth libraries (2h)\" → \"Implement auth with chosen library\""
      },
      {
        "title": "Phase 4: PRD for AI Coding Agents",
        "body": "When the PRD will be executed by AI agents (Claude Code, Cursor, Copilot Workspace, etc.), add these adaptations:"
      },
      {
        "title": "Agent-Optimized Story Format",
        "body": "agent_story:\n  id: \"US-001\"\n  title: \"Add priority field to tasks table\"\n  context: |\n    The tasks table is in src/db/schema.ts using Drizzle ORM.\n    Priority values should be: high, medium, low (default: medium).\n    See existing fields for naming conventions.\n  acceptance_criteria:\n    - \"Add `priority` column to `tasks` table in src/db/schema.ts\"\n    - \"Type: enum('high', 'medium', 'low'), default 'medium', not null\"\n    - \"Generate migration: `npx drizzle-kit generate`\"\n    - \"Run migration: `npx drizzle-kit push`\"\n    - \"Verify: `tsc --noEmit --strict` passes\"\n    - \"Verify: existing tests pass (`npm test`)\"\n  files_to_touch:\n    - src/db/schema.ts\n    - drizzle/ (generated migration)\n  commands_to_run:\n    - \"npx drizzle-kit generate\"\n    - \"npx drizzle-kit push\"\n    - \"tsc --noEmit --strict\"\n    - \"npm test\"\n  done_when: \"All verify commands pass with exit code 0\""
      },
      {
        "title": "Agent-Specific Rules",
        "body": "Be explicit about file paths. Agents can't guess your project structure.\nInclude verification commands. Agents need a \"definition of done\" they can check.\nOne context window per story. If a story needs the agent to remember more than ~50 files, it's too big.\nList files to touch. Reduces agent exploration time and prevents hallucination.\nOrder matters even more. Agents execute sequentially — wrong order = compounding errors.\nInclude the commands. Don't say \"run the migration\" — say npx drizzle-kit push."
      },
      {
        "title": "Project Context File",
        "body": "For AI agent execution, create a PROJECT_CONTEXT.md alongside the PRD:\n\n# Project Context\n\n## Stack\n- Framework: [Next.js 14 / Express / etc.]\n- Language: [TypeScript strict mode]\n- Database: [PostgreSQL via Drizzle ORM]\n- Testing: [Vitest + Testing Library]\n- Styling: [Tailwind CSS]\n\n## Key Directories\n- src/db/ — Database schema and migrations\n- src/api/ — API routes\n- src/components/ — React components\n- src/lib/ — Shared utilities\n- tests/ — Test files (mirror src/ structure)\n\n## Conventions\n- File naming: kebab-case\n- Component naming: PascalCase\n- Max file length: 300 lines\n- Max function length: 50 lines\n- All exports typed, no `any`\n\n## Commands\n- `npm run dev` — Start dev server\n- `npm test` — Run tests\n- `npm run build` — Production build\n- `tsc --noEmit --strict` — Type check\n- `npx drizzle-kit generate` — Generate migration\n- `npx drizzle-kit push` — Apply migration\n\n## Current State\n- [What exists today relevant to the PRD]\n- [Any tech debt or gotchas the agent should know]"
      },
      {
        "title": "Review Checklist (before sharing the PRD)",
        "body": "Completeness:\n\nProblem statement has evidence (not just opinion)\n \"What this is NOT\" section exists and is specific\n Every story has ≥3 acceptance criteria\n Edge cases table covers: empty state, error state, permissions, concurrent access\n Success metrics have baseline + target + timeframe\n Technical section addresses: performance, security, dependencies\n\nQuality:\n\nNo story larger than \"L\" (split XL stories)\n All acceptance criteria are binary (pass/fail)\n No circular dependencies between stories\n Dependency pyramid ordering is correct\n Release plan includes rollback strategy\n\nReadability:\n\nExecutive summary is <3 sentences\n Non-engineers can understand the problem section\n Engineers can start building from the stories section alone\n No jargon without definition"
      },
      {
        "title": "Approval Flow",
        "body": "Author writes PRD\n    ↓\nSelf-review (score with rubric — must be ≥60)\n    ↓\nPeer review (another PM or tech lead)\n    ↓\nEngineering review (feasibility + sizing)\n    ↓\nStakeholder approval (PM lead or product director)\n    ↓\nStatus → Approved\n    ↓\nSprint planning (stories → backlog)"
      },
      {
        "title": "Common Review Feedback (and how to fix it)",
        "body": "FeedbackFix\"What problem does this solve?\"Your problem statement is weak. Add evidence.\"This is too big\"Split into phases. Ship the smallest valuable slice first (MVP).\"How do we know it worked?\"Your success metrics are vague. Add numbers + timeframe.\"What about [edge case]?\"Your edge case table is incomplete. Add it.\"When does this ship?\"Add timeline with milestones, not just a deadline.\"Who approved this?\"Add approvers field and get explicit sign-offs."
      },
      {
        "title": "PRD Status Lifecycle",
        "body": "Draft → In Review → Approved → In Progress → Shipped → Post-Launch Review\n                ↓                                              ↓\n            Rejected                                    Iterate / Kill"
      },
      {
        "title": "Progress Tracking",
        "body": "Track story completion in the PRD itself or a linked tracker:\n\nprogress:\n  total_stories: 12\n  done: 7\n  in_progress: 2\n  blocked: 1\n  todo: 2\n  completion: \"58%\"\n  \n  blocked_items:\n    - story: \"US-008\"\n      blocker: \"Waiting for payments API access from finance team\"\n      since: \"2025-01-15\"\n      escalation: \"Pinged finance lead, follow up Friday\"\n\n  velocity:\n    stories_per_week: 3.5\n    estimated_completion: \"2025-02-01\""
      },
      {
        "title": "Post-Launch Review Template",
        "body": "post_launch:\n  shipped_date: \"\"\n  review_date: \"\" # 2-4 weeks after ship\n\n  metrics:\n    primary:\n      metric: \"\"\n      baseline: \"\"\n      target: \"\"\n      actual: \"\"\n      verdict: \"\" # hit | miss | exceeded\n\n    secondary:\n      - metric: \"\"\n        actual: \"\"\n        verdict: \"\"\n\n  qualitative:\n    user_feedback: []\n    support_tickets: \"\" # count related to this feature\n    unexpected_outcomes: []\n\n  process_retro:\n    what_went_well: []\n    what_didnt: []\n    estimation_accuracy: \"\" # actual vs estimated effort\n    scope_changes: \"\" # what changed after approval\n\n  decision: \"\" # iterate | maintain | deprecate | expand\n  next_actions: []"
      },
      {
        "title": "Quick Commands",
        "body": "CommandWhat It Does\"Write a PRD for [feature]\"Full PRD from discovery through stories\"Break this into stories\"Takes a feature description → user stories\"Review this PRD\"Scores against quality rubric + gives specific feedback\"Make this agent-ready\"Converts PRD stories to agent-optimized format\"What's missing from this PRD?\"Gap analysis against the template\"Split this story\"Takes a large story → smaller INVEST-compliant stories\"Score this PRD\"Quality rubric scoring with dimension breakdown\"Create project context for [project]\"Generates PROJECT_CONTEXT.md for AI agent execution\"Post-launch review for [feature]\"Generates review template with metrics\"Track progress\"Updates completion stats from story statuses"
      }
    ],
    "body": "PRD Engine — Product Requirements That Ship\n\nComplete product requirements methodology: from idea to spec to shipped feature. Not just a JSON template — a full system for writing PRDs that developers actually follow and stakeholders actually approve.\n\nWhen to Use This Skill\nTurning a vague idea into a buildable specification\nWriting PRDs for new features, products, or major refactors\nReviewing/improving existing PRDs before sprint planning\nBreaking epics into right-sized user stories\nCreating technical design documents alongside product specs\nPreparing specs for AI coding agents (Claude Code, Cursor, Copilot)\nPhase 1: Discovery Brief\n\nBefore writing a single requirement, answer these questions. Skip this and you'll rewrite the PRD 3 times.\n\nProblem Validation Checklist\ndiscovery_brief:\n  problem:\n    statement: \"\" # One sentence. If you need two, you don't understand it yet.\n    who_has_it: \"\" # Specific persona, not \"users\"\n    frequency: \"\" # Daily? Weekly? Once? (daily problems > occasional ones)\n    current_workaround: \"\" # What do they do today? (no workaround = maybe not a real problem)\n    evidence:\n      - type: \"\" # support_ticket | user_interview | analytics | churned_user | sales_objection\n        detail: \"\"\n        date: \"\"\n\n  impact:\n    users_affected: \"\" # Number or percentage\n    revenue_impact: \"\" # $ at risk or $ opportunity\n    strategic_alignment: \"\" # Which company goal does this serve?\n\n  constraints:\n    deadline: \"\" # Hard date or flexible?\n    budget: \"\" # Engineering weeks available\n    dependencies: \"\" # What must exist first?\n    regulatory: \"\" # Any compliance requirements?\n\n  success_metrics:\n    primary: \"\" # ONE metric that defines success\n    secondary: [] # 2-3 supporting metrics\n    measurement_method: \"\" # How will you actually measure this?\n    target: \"\" # Specific number, not \"improve\"\n    timeframe: \"\" # When do you expect to see results?\n\nProblem Statement Formula\n\n[Persona] needs [capability] because [reason], but currently [blocker], which causes [measurable impact].\n\nExamples:\n\n❌ \"Users need better onboarding\" (vague, unmeasurable)\n✅ \"New free-trial users (500/month) need to reach their first 'aha moment' within 10 minutes because 73% who don't will churn within 48 hours, but currently the average time-to-value is 34 minutes due to a 12-step setup wizard, which costs us ~$18K/month in lost conversions.\"\nKill Criteria\n\nBefore proceeding, check these. If any are true, STOP and push back:\n\nSignal\tAction\nNo evidence of the problem (just someone's opinion)\tDemand evidence. Opinions aren't requirements.\nSolution already decided (\"just build X\")\tRewind to the problem. Solutions without problems = features nobody uses.\nSuccess metric is unmeasurable\tDefine how you'll measure it or don't build it.\nAffects <1% of users with no revenue impact\tDeprioritize. Small problems with small impact = small returns.\nScope keeps expanding during discovery\tScope lock. If everything is in scope, nothing is.\nPhase 2: PRD Document\nPRD Template\n# PRD: [Feature Name]\n\n**Author:** [Name]\n**Status:** Draft | In Review | Approved | In Progress | Shipped\n**Created:** YYYY-MM-DD\n**Last Updated:** YYYY-MM-DD\n**Approvers:** [Names + roles]\n\n## 1. Problem & Opportunity\n\n[Problem statement from discovery brief — one paragraph max]\n\n### Evidence\n- [Evidence point 1 — with data]\n- [Evidence point 2 — with data]\n\n### Impact\n- Users affected: [number]\n- Revenue impact: [$ amount or % change]\n- Strategic goal: [which one]\n\n## 2. Solution Overview\n\n[2-3 paragraphs max. What are we building and why this approach?]\n\n### What This Is\n- [Bullet list of what's in scope]\n\n### What This Is NOT\n- [Explicit exclusions — this prevents scope creep]\n\n### Key Decisions Made\n| Decision | Options Considered | Chosen | Rationale |\n|----------|-------------------|--------|-----------|\n| [Decision 1] | A, B, C | B | [Why] |\n\n## 3. User Stories\n\n[See Phase 3 below for story writing methodology]\n\n## 4. Design & UX\n\n### User Flow\n1. User [action] →\n2. System [response] →\n3. User sees [outcome]\n\n### Wireframes/Mockups\n[Link to Figma/screenshots or describe key screens]\n\n### Edge Cases\n| Scenario | Expected Behavior |\n|----------|------------------|\n| [Edge case 1] | [What happens] |\n| [Edge case 2] | [What happens] |\n| Empty state | [What user sees with no data] |\n| Error state | [What user sees on failure] |\n| Slow connection | [Loading behavior] |\n\n## 5. Technical Considerations\n\n### Architecture Notes\n- [Key technical decisions]\n- [New services/APIs needed]\n- [Database changes]\n\n### Dependencies\n- [External service X]\n- [Team Y's API]\n- [Library Z]\n\n### Performance Requirements\n- Page load: <[X]ms\n- API response: <[X]ms\n- Concurrent users: [X]\n\n### Security & Privacy\n- [Data handling requirements]\n- [Auth/permissions needed]\n- [PII considerations]\n\n## 6. Release Plan\n\n### Rollout Strategy\n- [ ] Feature flag: [flag name]\n- [ ] Beta group: [who]\n- [ ] % rollout: [10% → 50% → 100%]\n- [ ] Rollback plan: [how]\n\n### Launch Checklist\n- [ ] QA sign-off\n- [ ] Analytics events implemented\n- [ ] Monitoring/alerts configured\n- [ ] Documentation updated\n- [ ] Support team briefed\n- [ ] Stakeholders notified\n\n## 7. Success Criteria\n\n| Metric | Current | Target | Timeframe |\n|--------|---------|--------|-----------|\n| [Primary metric] | [X] | [Y] | [Z weeks] |\n| [Secondary metric] | [X] | [Y] | [Z weeks] |\n\n### Post-Launch Review\n- **1-week check:** [What to look at]\n- **1-month review:** [What to measure]\n- **Kill/iterate decision:** [Criteria for each]\n\nPRD Quality Rubric (score before sharing)\nDimension\t0-2 (Weak)\t3-4 (Adequate)\t5 (Strong)\tWeight\nProblem clarity\tVague, no data\tClear but thin evidence\tSharp statement + multiple evidence points\tx4\nScope discipline\tEverything in scope\tSome boundaries\tExplicit in/out + \"what this is NOT\"\tx3\nStory quality\tVague tasks\tStories with some criteria\tINVEST stories + verifiable acceptance criteria\tx4\nEdge cases\tNone listed\tHappy path + 1-2 edges\tComprehensive: empty, error, slow, permissions, concurrent\tx3\nSuccess metrics\t\"Improve X\"\tMetric + target\tMetric + baseline + target + timeframe + measurement method\tx3\nTechnical feasibility\tNo tech section\tArchitecture notes\tDependencies, performance, security, migration plan\tx2\nRelease plan\tNone\t\"Ship it\"\tFeature flag + rollout % + rollback + launch checklist\tx1\n\nScoring: Sum (score × weight). Max = 100.\n\n80-100: Ship-ready. Get approvals.\n60-79: Solid but missing pieces. Fill gaps before review.\n40-59: Needs work. Major sections incomplete.\n<40: Start over or go back to discovery.\nPhase 3: User Story Methodology\nStory Format\nstory:\n  id: \"US-001\"\n  title: \"\" # Action-oriented: \"Add priority field to tasks table\"\n  persona: \"\" # Who benefits\n  narrative: \"As a [persona], I want [capability] so that [benefit]\"\n  acceptance_criteria:\n    - criterion: \"\" # Verifiable statement\n      type: \"functional\" # functional | performance | security | ux\n  priority: 1 # Execution order (dependencies first)\n  size: \"\" # XS | S | M | L | XL\n  status: \"todo\" # todo | in_progress | review | done\n  notes: \"\" # Runtime observations\n  depends_on: [] # Story IDs this depends on\n  blocked_by: [] # External blockers\n\nINVEST Checklist (every story must pass)\nLetter\tCriterion\tTest\nI — Independent\tCan be built without other incomplete stories\tNo circular dependencies\nN — Negotiable\tDetails can flex (the \"what\" is fixed, the \"how\" is flexible)\tMultiple implementation approaches exist\nV — Valuable\tDelivers user or business value on its own\tWould a user/stakeholder care if only this shipped?\nE — Estimable\tTeam can size it\tNo major unknowns (if unknowns exist, add a spike first)\nS — Small\tCompletable in one sprint (or one context window for AI agents)\t1-3 days of work max\nT — Testable\tHas verifiable acceptance criteria\tCan write a test for each criterion\nAcceptance Criteria Rules\n\nGood criteria are:\n\nBinary (pass/fail, not subjective)\nSpecific (numbers, not adjectives)\nIndependent (testable in isolation)\n❌ Bad\t✅ Good\n\"Works correctly\"\t\"Returns 200 with JSON body containing id, name, status fields\"\n\"Fast enough\"\t\"API responds in <200ms at p95 with 100 concurrent users\"\n\"User-friendly\"\t\"Form shows inline validation errors within 100ms of field blur\"\n\"Secure\"\t\"Endpoint returns 403 for users without admin role\"\n\"Handles errors\"\t\"On network timeout, shows retry button + cached data if available\"\n\nAlways include these universal criteria:\n\nTypecheck passes (tsc --noEmit --strict) (for TypeScript projects)\nAll existing tests still pass\nNew functionality has test coverage\nStory Sizing Guide\nSize\tScope\tTime\tExample\nXS\tConfig change, copy update, env var\t<2 hours\t\"Update error message text\"\nS\tSingle component/function, no new deps\t2-4 hours\t\"Add date picker to form\"\nM\tFeature slice: DB + API + UI\t1-2 days\t\"Add task priority with filter\"\nL\tMulti-component feature, new patterns\t2-3 days\t\"Add real-time notifications\"\nXL\tToo big. Split it.\t—\t—\nStory Ordering: The Dependency Pyramid\n\nAlways order stories bottom-up:\n\nLevel 1: Schema & Data (migrations, models, seed data)\n    ↑\nLevel 2: Backend Logic (services, APIs, business rules)\n    ↑\nLevel 3: Integration (API routes, auth, middleware)\n    ↑\nLevel 4: UI Components (forms, tables, modals)\n    ↑\nLevel 5: UX Polish (animations, empty states, loading)\n    ↑\nLevel 6: Analytics & Monitoring (events, dashboards)\n\n\nEach level depends ONLY on levels below it. Never build UI before the API exists.\n\nSplitting Strategies\n\nWhen a story is too big, split using one of these patterns:\n\nStrategy\tWhen to Use\tExample\nBy layer\tFull-stack feature\t\"Add schema\" → \"Add API\" → \"Add UI\"\nBy operation\tCRUD feature\t\"Create task\" → \"Read/list tasks\" → \"Update task\" → \"Delete task\"\nBy persona\tMulti-role feature\t\"Admin creates template\" → \"User fills template\" → \"Viewer sees results\"\nBy happy/sad path\tComplex flows\t\"Successful payment\" → \"Payment declined\" → \"Payment timeout\"\nBy platform\tCross-platform\t\"iOS support\" → \"Android support\" → \"Web support\"\nSpike + implement\tHigh uncertainty\t\"Spike: evaluate auth libraries (2h)\" → \"Implement auth with chosen library\"\nPhase 4: PRD for AI Coding Agents\n\nWhen the PRD will be executed by AI agents (Claude Code, Cursor, Copilot Workspace, etc.), add these adaptations:\n\nAgent-Optimized Story Format\nagent_story:\n  id: \"US-001\"\n  title: \"Add priority field to tasks table\"\n  context: |\n    The tasks table is in src/db/schema.ts using Drizzle ORM.\n    Priority values should be: high, medium, low (default: medium).\n    See existing fields for naming conventions.\n  acceptance_criteria:\n    - \"Add `priority` column to `tasks` table in src/db/schema.ts\"\n    - \"Type: enum('high', 'medium', 'low'), default 'medium', not null\"\n    - \"Generate migration: `npx drizzle-kit generate`\"\n    - \"Run migration: `npx drizzle-kit push`\"\n    - \"Verify: `tsc --noEmit --strict` passes\"\n    - \"Verify: existing tests pass (`npm test`)\"\n  files_to_touch:\n    - src/db/schema.ts\n    - drizzle/ (generated migration)\n  commands_to_run:\n    - \"npx drizzle-kit generate\"\n    - \"npx drizzle-kit push\"\n    - \"tsc --noEmit --strict\"\n    - \"npm test\"\n  done_when: \"All verify commands pass with exit code 0\"\n\nAgent-Specific Rules\nBe explicit about file paths. Agents can't guess your project structure.\nInclude verification commands. Agents need a \"definition of done\" they can check.\nOne context window per story. If a story needs the agent to remember more than ~50 files, it's too big.\nList files to touch. Reduces agent exploration time and prevents hallucination.\nOrder matters even more. Agents execute sequentially — wrong order = compounding errors.\nInclude the commands. Don't say \"run the migration\" — say npx drizzle-kit push.\nProject Context File\n\nFor AI agent execution, create a PROJECT_CONTEXT.md alongside the PRD:\n\n# Project Context\n\n## Stack\n- Framework: [Next.js 14 / Express / etc.]\n- Language: [TypeScript strict mode]\n- Database: [PostgreSQL via Drizzle ORM]\n- Testing: [Vitest + Testing Library]\n- Styling: [Tailwind CSS]\n\n## Key Directories\n- src/db/ — Database schema and migrations\n- src/api/ — API routes\n- src/components/ — React components\n- src/lib/ — Shared utilities\n- tests/ — Test files (mirror src/ structure)\n\n## Conventions\n- File naming: kebab-case\n- Component naming: PascalCase\n- Max file length: 300 lines\n- Max function length: 50 lines\n- All exports typed, no `any`\n\n## Commands\n- `npm run dev` — Start dev server\n- `npm test` — Run tests\n- `npm run build` — Production build\n- `tsc --noEmit --strict` — Type check\n- `npx drizzle-kit generate` — Generate migration\n- `npx drizzle-kit push` — Apply migration\n\n## Current State\n- [What exists today relevant to the PRD]\n- [Any tech debt or gotchas the agent should know]\n\nPhase 5: Review & Approval\nReview Checklist (before sharing the PRD)\n\nCompleteness:\n\n Problem statement has evidence (not just opinion)\n \"What this is NOT\" section exists and is specific\n Every story has ≥3 acceptance criteria\n Edge cases table covers: empty state, error state, permissions, concurrent access\n Success metrics have baseline + target + timeframe\n Technical section addresses: performance, security, dependencies\n\nQuality:\n\n No story larger than \"L\" (split XL stories)\n All acceptance criteria are binary (pass/fail)\n No circular dependencies between stories\n Dependency pyramid ordering is correct\n Release plan includes rollback strategy\n\nReadability:\n\n Executive summary is <3 sentences\n Non-engineers can understand the problem section\n Engineers can start building from the stories section alone\n No jargon without definition\nApproval Flow\nAuthor writes PRD\n    ↓\nSelf-review (score with rubric — must be ≥60)\n    ↓\nPeer review (another PM or tech lead)\n    ↓\nEngineering review (feasibility + sizing)\n    ↓\nStakeholder approval (PM lead or product director)\n    ↓\nStatus → Approved\n    ↓\nSprint planning (stories → backlog)\n\nCommon Review Feedback (and how to fix it)\nFeedback\tFix\n\"What problem does this solve?\"\tYour problem statement is weak. Add evidence.\n\"This is too big\"\tSplit into phases. Ship the smallest valuable slice first (MVP).\n\"How do we know it worked?\"\tYour success metrics are vague. Add numbers + timeframe.\n\"What about [edge case]?\"\tYour edge case table is incomplete. Add it.\n\"When does this ship?\"\tAdd timeline with milestones, not just a deadline.\n\"Who approved this?\"\tAdd approvers field and get explicit sign-offs.\nPhase 6: Tracking & Iteration\nPRD Status Lifecycle\nDraft → In Review → Approved → In Progress → Shipped → Post-Launch Review\n                ↓                                              ↓\n            Rejected                                    Iterate / Kill\n\nProgress Tracking\n\nTrack story completion in the PRD itself or a linked tracker:\n\nprogress:\n  total_stories: 12\n  done: 7\n  in_progress: 2\n  blocked: 1\n  todo: 2\n  completion: \"58%\"\n  \n  blocked_items:\n    - story: \"US-008\"\n      blocker: \"Waiting for payments API access from finance team\"\n      since: \"2025-01-15\"\n      escalation: \"Pinged finance lead, follow up Friday\"\n\n  velocity:\n    stories_per_week: 3.5\n    estimated_completion: \"2025-02-01\"\n\nPost-Launch Review Template\npost_launch:\n  shipped_date: \"\"\n  review_date: \"\" # 2-4 weeks after ship\n\n  metrics:\n    primary:\n      metric: \"\"\n      baseline: \"\"\n      target: \"\"\n      actual: \"\"\n      verdict: \"\" # hit | miss | exceeded\n\n    secondary:\n      - metric: \"\"\n        actual: \"\"\n        verdict: \"\"\n\n  qualitative:\n    user_feedback: []\n    support_tickets: \"\" # count related to this feature\n    unexpected_outcomes: []\n\n  process_retro:\n    what_went_well: []\n    what_didnt: []\n    estimation_accuracy: \"\" # actual vs estimated effort\n    scope_changes: \"\" # what changed after approval\n\n  decision: \"\" # iterate | maintain | deprecate | expand\n  next_actions: []\n\nQuick Commands\nCommand\tWhat It Does\n\"Write a PRD for [feature]\"\tFull PRD from discovery through stories\n\"Break this into stories\"\tTakes a feature description → user stories\n\"Review this PRD\"\tScores against quality rubric + gives specific feedback\n\"Make this agent-ready\"\tConverts PRD stories to agent-optimized format\n\"What's missing from this PRD?\"\tGap analysis against the template\n\"Split this story\"\tTakes a large story → smaller INVEST-compliant stories\n\"Score this PRD\"\tQuality rubric scoring with dimension breakdown\n\"Create project context for [project]\"\tGenerates PROJECT_CONTEXT.md for AI agent execution\n\"Post-launch review for [feature]\"\tGenerates review template with metrics\n\"Track progress\"\tUpdates completion stats from story statuses"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/1kalin/afrexai-prd-engine",
    "publisherUrl": "https://clawhub.ai/1kalin/afrexai-prd-engine",
    "owner": "1kalin",
    "version": "1.0.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/afrexai-prd-engine",
    "downloadUrl": "https://openagent3.xyz/downloads/afrexai-prd-engine",
    "agentUrl": "https://openagent3.xyz/skills/afrexai-prd-engine/agent",
    "manifestUrl": "https://openagent3.xyz/skills/afrexai-prd-engine/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/afrexai-prd-engine/agent.md"
  }
}