{
  "schemaVersion": "1.0",
  "item": {
    "slug": "afrexai-vibe-coding",
    "name": "Vibe Coding Mastery",
    "source": "tencent",
    "type": "skill",
    "category": "开发工具",
    "sourceUrl": "https://clawhub.ai/1kalin/afrexai-vibe-coding",
    "canonicalUrl": "https://clawhub.ai/1kalin/afrexai-vibe-coding",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/afrexai-vibe-coding",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=afrexai-vibe-coding",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "README.md",
      "SKILL.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-05-07T17:22:31.273Z",
      "expiresAt": "2026-05-14T17:22:31.273Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=afrexai-annual-report",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=afrexai-annual-report",
        "contentDisposition": "attachment; filename=\"afrexai-annual-report-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/afrexai-vibe-coding"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/afrexai-vibe-coding",
    "agentPageUrl": "https://openagent3.xyz/skills/afrexai-vibe-coding/agent",
    "manifestUrl": "https://openagent3.xyz/skills/afrexai-vibe-coding/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/afrexai-vibe-coding/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Vibe Coding Mastery",
        "body": "The complete system for building software with AI — from zero to production. Not tips. Not theory. A full operating methodology.\n\nWhat is vibe coding? Programming where you describe what you want and let AI generate code. You evaluate by results, not by reading every line. Coined by Andrej Karpathy (Feb 2025).\n\nKey distinction (Simon Willison): If you review, test, and explain the code — that's AI-assisted software development. Vibe coding means accepting AI output without fully understanding every function. This skill covers both modes and the spectrum between them."
      },
      {
        "title": "Phase 1: When to Vibe (Decision Matrix)",
        "body": "Before starting, classify your project:\n\nFactorVibe ✅Don't Vibe ❌StakesLow (prototype, internal, learning)High (payments, auth, compliance)TimelineHours to daysMonths+Team sizeSolo or pairLarge team with standardsDomain knowledgeYou understand the domainUnfamiliar territoryReversibilityEasy to rewriteHard to change laterData sensitivityPublic/test dataPII, financial, health\n\nScoring: Count ✅ checks.\n\n5-6: Full vibe mode. Ship fast.\n3-4: Vibe with guardrails. Review critical paths.\n1-2: AI-assisted development, not vibe coding. Review everything.\n0: Write it yourself or hire someone who understands the domain."
      },
      {
        "title": "Vibe Coding Maturity Levels",
        "body": "LevelDescriptionWhoL1 — PassengerCopy-paste AI output, hope it worksBeginnersL2 — NavigatorGuide AI with context, catch obvious errorsIntermediateL3 — PilotArchitecture decisions, AI implements, you reviewExperienced devsL4 — ConductorOrchestrate multiple AI sessions, parallel streamsPower users\n\nTarget: L3 minimum for anything going to production."
      },
      {
        "title": "Primary Tools Matrix",
        "body": "ToolBest ForContext WindowMulti-fileTerminalCostClaude CodeFull-stack, complex refactors, CLI200KExcellentNativeAPI usageCursorEditor-integrated, rapid iteration128KGoodVia terminal$20/mo + APIWindsurfBeginner-friendly, guided flows128KGoodLimited$10/mo + APIGitHub CopilotInline completions, small edits8-32KLimitedNo$10-19/moAiderGit-aware, open source, CLIVariesGoodNativeAPI onlyCline (VS Code)VS Code native, plan modeVariesGoodVia terminalAPI only"
      },
      {
        "title": "Multi-Tool Strategy",
        "body": "Use tools in combination:\n\nArchitecture & planning → Claude Code or Claude chat (largest context, best reasoning)\nImplementation → Cursor or Claude Code (fast iteration, multi-file edits)\nQuick fixes & completions → Copilot (inline, zero friction)\nCode review → Claude chat (paste diffs, get thorough review)"
      },
      {
        "title": "Phase 3: Rules Files (Your Persistent Context)",
        "body": "Rules files teach AI your conventions once. Without them, every session starts from zero."
      },
      {
        "title": "Universal Rules File Template",
        "body": "# Project Rules\n\n## Stack\n- Language: [TypeScript/Python/Go/etc.]\n- Framework: [Next.js/FastAPI/etc.]\n- Database: [PostgreSQL/SQLite/etc.]\n- Styling: [Tailwind/CSS Modules/etc.]\n- Package manager: [pnpm/npm/poetry/etc.]\n\n## Code Style\n- Max function length: 50 lines\n- Max file length: 300 lines\n- One export per file (prefer)\n- Use [const/let, never var] / [type hints always]\n- Error handling: [explicit try/catch, never swallow errors]\n- Naming: [camelCase functions, PascalCase components, UPPER_SNAKE constants]\n\n## Architecture\n- File structure: [describe or reference]\n- API pattern: [REST/tRPC/GraphQL]\n- State management: [Zustand/Redux/signals/etc.]\n- Auth pattern: [JWT/session/OAuth provider]\n\n## Testing\n- Framework: [Vitest/Jest/Pytest/etc.]\n- Minimum coverage: [80%/90%/etc.]\n- Test file location: [co-located/__tests__/tests/]\n- Run before committing: [command]\n\n## Do NOT\n- Do not use `any` type in TypeScript\n- Do not install new dependencies without asking\n- Do not modify database schema without migration\n- Do not hardcode secrets, URLs, or config values\n- Do not remove existing tests\n\n## When Unsure\n- Ask before making architectural decisions\n- Show the plan before implementing changes >100 lines\n- Flag security-adjacent code for manual review"
      },
      {
        "title": "Where to Put It",
        "body": "ToolFileNotesClaude CodeCLAUDE.md in repo rootAlso reads .claude/ directoryCursor.cursor/rules/*.mdcSupports conditional rules with globsWindsurf.windsurfrules in repo rootSingle fileAider.aider.conf.yml + conventions in chatYAML config + initial promptGenericAGENTS.md or CONVENTIONS.mdAny tool can be told to read it"
      },
      {
        "title": "Cursor Conditional Rules (.mdc)",
        "body": "---\ndescription: React component standards\nglobs: src/components/**/*.tsx\nalwaysApply: false\n---\n\n# Component Rules\n- Functional components only (no class components)\n- Props interface above component, named [Component]Props\n- Use forwardRef for components that accept ref\n- Co-locate styles in [component].module.css\n- Co-locate tests in [component].test.tsx\n- Export component as named export, not default"
      },
      {
        "title": "Rules File Quality Checklist",
        "body": "Stack and versions specified\n File/function size limits defined\n Naming conventions documented\n \"Do NOT\" section with common AI mistakes\n Testing expectations clear\n Architecture patterns described or referenced\n Security-sensitive areas flagged\n Dependencies policy stated"
      },
      {
        "title": "The 5-Level Prompt Quality Hierarchy",
        "body": "Level 1 — Wish (bad)\n\n\"Build a todo app\"\n\nLevel 2 — Request (okay)\n\n\"Build a todo app with React and Tailwind\"\n\nLevel 3 — Specification (good)\n\n\"Build a todo app: React 18, TypeScript, Tailwind. Features: add/edit/delete/toggle todos. Store in localStorage. Responsive. Under 200 lines total.\"\n\nLevel 4 — Brief (great)\n\n\"Build a todo app. Here's the spec:\n\nStack: React 18 + TS + Tailwind + Vite\nFeatures: CRUD todos, toggle complete, filter (all/active/done), persist to localStorage\nConstraints: Single component file, under 200 lines, no external deps beyond stack\nDone when: All features work, page refresh preserves state, mobile responsive\nStart with the data types, then build up.\"\n\nLevel 5 — Contract (production-grade)\n\ntask: Todo application\nstack:\n  runtime: React 18 + TypeScript strict\n  styling: Tailwind CSS 3.x\n  build: Vite 5\n  test: Vitest + Testing Library\nfeatures:\n  - CRUD operations on todos\n  - Toggle completion status\n  - Filter: all | active | completed\n  - Bulk actions: complete all, clear completed\n  - Persist to localStorage with versioned schema\nconstraints:\n  - Max 3 component files\n  - Max 200 lines per file\n  - No external state management library\n  - Keyboard accessible (tab, enter, escape)\n  - Mobile responsive (min 320px)\nacceptance:\n  - All features functional\n  - Page refresh preserves state\n  - 90%+ test coverage\n  - No TypeScript errors (strict mode)\n  - Lighthouse accessibility score > 90\napproach: Start with types/interfaces, then hooks, then components, then tests."
      },
      {
        "title": "12 Proven Prompt Patterns",
        "body": "Scaffolding: \"Create the project structure with empty files and type definitions. Don't implement yet.\"\nIncremental: \"Implement only [specific function]. Don't touch other files.\"\nExplain-then-build: \"Explain how you'd architect this, then implement after I approve.\"\nTest-first: \"Write the tests first based on these requirements. Then implement to make them pass.\"\nRefactor: \"Refactor [file] to [goal]. Keep the same behavior. Don't add features.\"\nDebug: [paste error] \"This happens when [action]. Expected [behavior]. The relevant code is in [files].\"\nReview: \"Review this code for [security/performance/readability]. Be specific about issues and fixes.\"\nMigrate: \"Convert this from [old pattern] to [new pattern]. Show me the plan first.\"\nDocument: \"Add JSDoc/docstrings to all public functions in [file]. Include param types and examples.\"\nOptimize: \"This function is slow with >10K items. Profile, identify bottleneck, optimize. Keep same API.\"\nParallel session: \"Read [these files] and summarize the architecture. Don't change anything.\"\nRecovery: \"The codebase is in a broken state. [describe symptoms]. Help me understand what went wrong before we fix it.\""
      },
      {
        "title": "Anti-Patterns (What NOT to Prompt)",
        "body": "Anti-PatternWhy It FailsFix\"Build me an app\"Too vague, AI guesses everythingUse Level 4+ prompts\"Fix it\" (no context)AI doesn't know what \"it\" isPaste error + expected behavior\"Rewrite everything\"Nukes working code, introduces regressionsIncremental refactors\"Make it better\"Subjective, AI changes random thingsSpecify what \"better\" means\"Use best practices\"AI's \"best practices\" may not match your stackSpecify the practices you wantMultiple unrelated asksContext bleed, partial implementationsOne task per promptLong conversation chainsContext degrades after 10+ turnsStart fresh sessions"
      },
      {
        "title": "Phase 5: The RPIV Workflow",
        "body": "Research → Plan → Implement → Validate"
      },
      {
        "title": "Step 1: Research",
        "body": "\"Read [files/docs/codebase]. Explain how [feature/module] works. Don't modify anything.\"\n\nPurpose: Load context. Catch misunderstandings before they cascade. AI explains back to you — if the explanation is wrong, the implementation will be wrong too."
      },
      {
        "title": "Step 2: Plan",
        "body": "\"Based on your understanding, write a plan:\n\nWhich files you'll create/modify\nWhat changes in each file\nWhat order you'll implement\nWhat could go wrong\"\n\nPurpose: Review the approach before committing to it. 10x cheaper to fix a plan than debug cascading implementation errors.\n\nPlan Review Checklist:\n\nDoes it touch files it shouldn't?\n Is the change order logical (types → utils → components → tests)?\n Are there missing files or steps?\n Does it respect existing patterns?\n Did it flag risks/unknowns?"
      },
      {
        "title": "Step 3: Implement",
        "body": "\"Proceed with the plan. Implement step by step. Stop after each file for me to verify.\"\n\nThe 200-Line Rule: If any single implementation step is >200 lines of changes, break it down further. Large changes = large bugs.\n\nCheckpoint System:\n\nAfter each file: quick scan for obvious issues\nAfter each feature: run tests\nAfter each milestone: manual test + commit"
      },
      {
        "title": "Step 4: Validate",
        "body": "\"Run the tests. Show me the output. If anything fails, explain why and fix it.\"\n\nThen manually verify:\n\nFeature works as specified\n Edge cases handled (empty state, max length, special chars)\n No console errors\n Mobile responsive (if UI)\n Existing features still work (regression check)"
      },
      {
        "title": "Phase 6: Architecture for Vibe Coding",
        "body": "AI generates better code when your architecture is clear and consistent."
      },
      {
        "title": "Recommended Project Structure",
        "body": "project/\n├── CLAUDE.md (or .cursorrules)     # AI rules\n├── README.md                        # What this is\n├── src/\n│   ├── types/                       # Shared types (AI reads these first)\n│   │   ├── index.ts\n│   │   └── [domain].ts\n│   ├── lib/                         # Pure utilities (no side effects)\n│   │   ├── [utility].ts\n│   │   └── [utility].test.ts\n│   ├── services/                    # External integrations (DB, API, etc.)\n│   │   ├── [service].ts\n│   │   └── [service].test.ts\n│   ├── components/ (or routes/)     # UI or route handlers\n│   │   ├── [Component]/\n│   │   │   ├── index.tsx\n│   │   │   ├── [Component].test.tsx\n│   │   │   └── [Component].module.css\n│   └── app/                         # App entry, layout, config\n├── tests/                           # Integration/E2E tests\n├── scripts/                         # Build/deploy/utility scripts\n└── docs/                            # Architecture decisions, API docs"
      },
      {
        "title": "Vibe-Friendly Patterns",
        "body": "Types first. Define your data shapes before anything else. AI uses these as contracts.\nSmall files. 300 lines max. AI handles small files better — fewer hallucinations, cleaner diffs.\nExplicit imports. No barrel exports (index.ts re-exports). AI gets confused by indirect imports.\nCo-located tests. thing.ts + thing.test.ts side by side. AI writes tests when they're right there.\nConfig in one place. Environment, feature flags, constants — one file AI can reference.\nDatabase schema as code. Drizzle/Prisma schema file = single source of truth AI can read."
      },
      {
        "title": "Schema-First Design",
        "body": "// src/types/todo.ts — AI reads this and understands your domain\nexport interface Todo {\n  id: string;          // UUID v4\n  title: string;       // 1-200 chars, trimmed\n  completed: boolean;  // default false\n  createdAt: Date;\n  updatedAt: Date;\n}\n\nexport interface CreateTodoInput {\n  title: string;       // Required, 1-200 chars\n}\n\nexport interface UpdateTodoInput {\n  title?: string;\n  completed?: boolean;\n}\n\n// This is ALL AI needs to implement CRUD operations correctly."
      },
      {
        "title": "The Vibe Testing Pyramid",
        "body": "/  E2E  \\        ← 10% (critical user flows only)\n        / Integration \\    ← 30% (API endpoints, DB queries)\n       /    Unit Tests  \\  ← 60% (pure functions, utils, logic)"
      },
      {
        "title": "Test-First Vibe Pattern",
        "body": "Prompt: \"Write tests for a function that validates email addresses.\nRequirements:\n- Returns true for valid emails\n- Returns false for empty string, missing @, missing domain\n- Handles edge cases: plus addressing, subdomains, international domains\nWrite ONLY the tests. I'll implement after.\"\n\nThen: \"Now implement the function to make all tests pass.\"\n\nThis pattern produces better code because AI has clear acceptance criteria."
      },
      {
        "title": "What to Test (Minimum Viable Testing)",
        "body": "CategoryTest?WhyPure functionsAlwaysEasy, high value, catches logic bugsData transformationsAlwaysWrong transforms corrupt data silentlyAPI endpointsAlwaysContract verificationUI componentsSometimesTest behavior, not implementationDatabase queriesSometimesTest complex queries, skip simple CRUDConfig/env loadingRarelyTest once, trust afterThird-party wrappersRarelyTest integration, not their code"
      },
      {
        "title": "When AI Tests Are Wrong",
        "body": "Signs of bad AI tests:\n\nTests that test the implementation, not the behavior\nTests that pass with any input (always return true)\nTests that mock everything (testing mocks, not code)\nSnapshot tests for everything (brittle, meaningless)\n\nFix: \"These tests mock too much. Write tests that exercise real behavior. Only mock external services (DB, API calls). Use in-memory alternatives where possible.\""
      },
      {
        "title": "The Error Paste Pattern",
        "body": "What Karpathy does: Copy error, paste with no comment, AI usually fixes it.\n\nWhen it works: Clear error messages, stack traces, type errors, syntax errors.\n\nWhen it doesn't (and what to do instead):\n\nSituationBetter PromptVague runtime error\"When I [action], [behavior] happens. Expected [expected]. Here's the relevant code: [paste]\"Silent failure\"This function returns [wrong result] for input [input]. Expected [expected]. Walk me through the logic step by step.\"Intermittent bug\"This works sometimes but fails with [condition]. I think it's a [race condition/state issue/timing problem]. Here's the code:\"Build/config errorPaste full error + your config files. \"Don't guess — check the config values against the docs.\"AI broke something while fixing\"Stop. Let's go back. The original issue was [X]. You introduced a new bug: [Y]. Let's fix the original issue without changing [Z].\""
      },
      {
        "title": "The 3-Strike Rule",
        "body": "If AI can't fix something in 3 attempts:\n\nStop. Don't keep asking the same thing.\nReframe. Describe the behavior you want, not the error.\nSimplify. Create a minimal reproduction case.\nStart fresh. New session, clean context.\nManual. Sometimes you need to read the code yourself."
      },
      {
        "title": "Recovery Playbooks",
        "body": "Spaghetti Code (AI made a mess)\n\n1. git stash (save current mess)\n2. git checkout [last good commit]\n3. Start a NEW AI session\n4. Paste only the requirements, not the broken code\n5. \"Implement this from scratch following these patterns: [your conventions]\"\n\nRecurring Bug (Fix breaks something else)\n\n1. Write a failing test for the bug\n2. Write regression tests for the things that keep breaking\n3. \"Make ALL these tests pass. Don't modify the tests.\"\n\nDependency Hell\n\n1. Check `package.json` / `requirements.txt` — AI sometimes adds conflicting deps\n2. \"List all dependencies you added and why each is needed\"\n3. Remove anything that duplicates existing functionality\n4. Lock versions: \"Pin all dependencies to exact versions\"\n\nContext Exhaustion (AI forgot earlier instructions)\n\n1. Start a new session\n2. Load rules file + key files\n3. Summarize what's done and what remains\n4. Continue with fresh context"
      },
      {
        "title": "Phase 9: Production Graduation Checklist",
        "body": "Before ANY vibe-coded project goes to production:"
      },
      {
        "title": "P0 — Security (Must fix)",
        "body": "No hardcoded secrets (grep for API keys, passwords, tokens)\n Input validation on all user inputs (XSS, SQL injection, path traversal)\n Authentication checks on protected routes\n Authorization: users can only access their own data\n HTTPS enforced\n Dependencies: npm audit / pip audit — zero critical/high\n Rate limiting on public endpoints\n CORS configured (not * in production)\n Error messages don't leak internals (no stack traces to users)"
      },
      {
        "title": "P1 — Performance (Should fix)",
        "body": "Database queries have indexes for common filters\n No N+1 queries (check ORM query logs)\n Images optimized (WebP, lazy load)\n Bundle size reasonable (<200KB initial JS)\n Loading states for async operations\n Pagination for list endpoints (no unbounded queries)"
      },
      {
        "title": "P2 — Reliability (Should fix)",
        "body": "Error handling: try/catch on all async operations\n Graceful degradation when services are down\n Health check endpoint\n Logging (structured, not console.log)\n Environment config via env vars (not hardcoded)\n Database migrations (not raw SQL)\n Backup strategy for data"
      },
      {
        "title": "P3 — Quality (Nice to have)",
        "body": "Test coverage >80%\n TypeScript strict mode / type hints everywhere\n Linter configured and clean\n README with setup instructions\n CI pipeline runs tests on push\n\nAI-Assisted Hardening Prompt:\n\n\"Review this codebase for production readiness. Check against this list: [paste checklist]. For each item, tell me: pass/fail/not applicable, and what to fix if fail. Be specific — file names and line numbers.\""
      },
      {
        "title": "Parallel AI Sessions",
        "body": "Run multiple AI sessions simultaneously:\n\nSession A: Implementing backend API\nSession B: Building frontend components\nSession C: Writing tests\n\nRules for parallel sessions:\n\nDefine interfaces/types FIRST (shared contract)\nEach session gets its own rules file section\nMerge via git (commit each session's work to a branch)\nIntegration test after merging"
      },
      {
        "title": "Pair Programming Patterns",
        "body": "Navigator-Driver (you navigate, AI drives)\n\nYou: \"We need to add pagination. The API should accept page and limit query params. Return items, total count, and hasNextPage.\"\nAI: [implements]\nYou: \"Good. Now add cursor-based pagination as an alternative. The cursor should be the last item's ID.\"\nAI: [implements]\n\nPing-Pong (alternate implementing)\n\nYou: Write the test\nAI: Write the implementation\nYou: Write the next test\nAI: Write the next implementation\n(TDD style — extremely effective)\n\nRubber Duck (AI explains, you catch issues)\n\n\"Walk me through this code line by line. Explain what each function does, what could go wrong, and what assumptions you're making.\"\n(AI explains → you catch bad assumptions before they become bugs)"
      },
      {
        "title": "Context Window Management",
        "body": "StrategyWhenHowFresh startEvery 15-20 turnsNew session, reload rules + key filesSummarizeBefore complex task\"Summarize what we've done. Then let's tackle [next thing].\"File focusLarge codebase\"Only look at src/services/auth.ts. Ignore everything else.\"Memory fileMulti-session projectKeep PROGRESS.md with what's done/remaining"
      },
      {
        "title": "Git Workflow for Vibe Coding",
        "body": "# Before starting\ngit checkout -b feature/[name]\ngit status  # clean working tree\n\n# During (commit often!)\ngit add -A && git commit -m \"feat: [what AI just implemented]\"\n# Every 2-3 AI turns, commit. Your safety net.\n\n# If things go wrong\ngit diff  # see what AI changed\ngit stash  # save mess\ngit checkout .  # nuclear option: discard all changes\n\n# When done\ngit diff main..HEAD  # review ALL changes before merging"
      },
      {
        "title": "Phase 11: Common Mistakes & How to Avoid Them",
        "body": "#MistakeConsequencePrevention1No rules fileAI reinvents conventions each sessionWrite rules file before first prompt2Prompting implementation before planCascading wrong assumptionsAlways: Research → Plan → Implement3Never reading AI's codeHidden bugs, security holes, debtReview at least critical paths4One giant promptAI loses focus, partial implementationOne task per prompt, sequential5Not committing frequentlyCan't rollback when AI breaks thingsCommit every 2-3 turns6Ignoring test failures\"It works on my machine\"Tests pass = done. Not before.7Letting AI add dependencies freelyBloated bundle, version conflicts\"Don't add deps without asking\" in rules8No production checklistShip security holesPhase 9 checklist before deploy9Marathon AI sessionsContext degrades, AI \"forgets\"Fresh session every 15-20 turns10Vibe coding auth/paymentsCritical bugs in critical pathsManual review for all security code11No types/schemaAI guesses data shapes differently each timeDefine types FIRST, always12Trusting AI's \"it works\"AI confidently ships broken codeVerify yourself. Run it. Test it.13Same prompt after 3 failuresAI stuck in a loopReframe, simplify, or do it manually14Mixing concerns in one sessionContext pollutionOne feature per session15No architecture guidanceAI creates inconsistent patternsDocument patterns in rules file"
      },
      {
        "title": "Phase 12: Weekly Effectiveness Tracking",
        "body": "Track your vibe coding quality over time:\n\nweek_of: \"YYYY-MM-DD\"\nsessions: [count]\nfeatures_shipped: [count]\nbugs_introduced: [count]  # found post-ship\nbugs_caught_in_review: [count]  # caught before ship\navg_prompts_per_feature: [count]\ntime_saved_estimate_hours: [number]\nfresh_session_restarts: [count]\n\n# Score yourself (1-5):\nprompt_quality: [1-5]      # Are you using Level 4+ prompts?\nreview_discipline: [1-5]   # Are you reviewing critical code?\ntesting_rigor: [1-5]       # Are you testing before shipping?\narchitecture: [1-5]        # Is the codebase staying clean?\ncommit_frequency: [1-5]    # Are you committing every 2-3 turns?\n\ntotal_score: [5-25]\n\nScoreRatingAction20-25EliteYou're a vibe coding conductor. Teach others.15-19SolidGood habits. Focus on weakest dimension.10-14LearningReview this guide weekly. Build the habits.5-9RiskySlow down. More planning, more testing, more review."
      },
      {
        "title": "The 10 Commandments of Vibe Coding",
        "body": "Types first. Define your data before writing logic.\nRules file always. No rules = no consistency.\nPlan before implement. 5 minutes planning saves 5 hours debugging.\nOne task per prompt. Focus = quality.\nCommit after every win. Git is your safety net.\nTest the critical path. At minimum: happy path + one edge case.\nFresh sessions. Don't let context rot.\nReview security code. Auth, payments, data access — always manual review.\n200-line rule. If a change is bigger, break it down.\nKnow when to stop vibing. If AI can't fix it in 3 tries, change approach."
      },
      {
        "title": "Quick Reference Commands",
        "body": "\"Read [files] and explain the architecture. Don't change anything.\"\n\"Write a plan for [feature]. List files to create/modify and changes in each.\"\n\"Implement only [specific thing]. Don't touch other files.\"\n\"Write tests first for [requirements]. Then implement to pass them.\"\n\"Review this for [security/performance/readability]. Be specific.\"\n\"This error occurs when [action]. Expected [behavior]. Here's the code: [paste]\"\n\"Refactor [file] to [goal]. Same behavior. Don't add features.\"\n\"What dependencies did you add and why? Remove anything unnecessary.\"\n\"Walk me through this code. Explain assumptions and potential issues.\"\n\"Stop. The original issue was [X]. Let's start fresh with a minimal approach.\"\n\"Run all tests. If any fail, fix them without breaking other tests.\"\n\"Check this against the production checklist: [paste P0-P3 items].\"\n\nBuilt by AfrexAI — the team that ships AI agents, not just AI prompts."
      }
    ],
    "body": "Vibe Coding Mastery\n\nThe complete system for building software with AI — from zero to production. Not tips. Not theory. A full operating methodology.\n\nWhat is vibe coding? Programming where you describe what you want and let AI generate code. You evaluate by results, not by reading every line. Coined by Andrej Karpathy (Feb 2025).\n\nKey distinction (Simon Willison): If you review, test, and explain the code — that's AI-assisted software development. Vibe coding means accepting AI output without fully understanding every function. This skill covers both modes and the spectrum between them.\n\nPhase 1: When to Vibe (Decision Matrix)\n\nBefore starting, classify your project:\n\nFactor\tVibe ✅\tDon't Vibe ❌\nStakes\tLow (prototype, internal, learning)\tHigh (payments, auth, compliance)\nTimeline\tHours to days\tMonths+\nTeam size\tSolo or pair\tLarge team with standards\nDomain knowledge\tYou understand the domain\tUnfamiliar territory\nReversibility\tEasy to rewrite\tHard to change later\nData sensitivity\tPublic/test data\tPII, financial, health\n\nScoring: Count ✅ checks.\n\n5-6: Full vibe mode. Ship fast.\n3-4: Vibe with guardrails. Review critical paths.\n1-2: AI-assisted development, not vibe coding. Review everything.\n0: Write it yourself or hire someone who understands the domain.\nVibe Coding Maturity Levels\nLevel\tDescription\tWho\nL1 — Passenger\tCopy-paste AI output, hope it works\tBeginners\nL2 — Navigator\tGuide AI with context, catch obvious errors\tIntermediate\nL3 — Pilot\tArchitecture decisions, AI implements, you review\tExperienced devs\nL4 — Conductor\tOrchestrate multiple AI sessions, parallel streams\tPower users\n\nTarget: L3 minimum for anything going to production.\n\nPhase 2: Tool Selection\nPrimary Tools Matrix\nTool\tBest For\tContext Window\tMulti-file\tTerminal\tCost\nClaude Code\tFull-stack, complex refactors, CLI\t200K\tExcellent\tNative\tAPI usage\nCursor\tEditor-integrated, rapid iteration\t128K\tGood\tVia terminal\t$20/mo + API\nWindsurf\tBeginner-friendly, guided flows\t128K\tGood\tLimited\t$10/mo + API\nGitHub Copilot\tInline completions, small edits\t8-32K\tLimited\tNo\t$10-19/mo\nAider\tGit-aware, open source, CLI\tVaries\tGood\tNative\tAPI only\nCline (VS Code)\tVS Code native, plan mode\tVaries\tGood\tVia terminal\tAPI only\nMulti-Tool Strategy\n\nUse tools in combination:\n\nArchitecture & planning → Claude Code or Claude chat (largest context, best reasoning)\nImplementation → Cursor or Claude Code (fast iteration, multi-file edits)\nQuick fixes & completions → Copilot (inline, zero friction)\nCode review → Claude chat (paste diffs, get thorough review)\nPhase 3: Rules Files (Your Persistent Context)\n\nRules files teach AI your conventions once. Without them, every session starts from zero.\n\nUniversal Rules File Template\n# Project Rules\n\n## Stack\n- Language: [TypeScript/Python/Go/etc.]\n- Framework: [Next.js/FastAPI/etc.]\n- Database: [PostgreSQL/SQLite/etc.]\n- Styling: [Tailwind/CSS Modules/etc.]\n- Package manager: [pnpm/npm/poetry/etc.]\n\n## Code Style\n- Max function length: 50 lines\n- Max file length: 300 lines\n- One export per file (prefer)\n- Use [const/let, never var] / [type hints always]\n- Error handling: [explicit try/catch, never swallow errors]\n- Naming: [camelCase functions, PascalCase components, UPPER_SNAKE constants]\n\n## Architecture\n- File structure: [describe or reference]\n- API pattern: [REST/tRPC/GraphQL]\n- State management: [Zustand/Redux/signals/etc.]\n- Auth pattern: [JWT/session/OAuth provider]\n\n## Testing\n- Framework: [Vitest/Jest/Pytest/etc.]\n- Minimum coverage: [80%/90%/etc.]\n- Test file location: [co-located/__tests__/tests/]\n- Run before committing: [command]\n\n## Do NOT\n- Do not use `any` type in TypeScript\n- Do not install new dependencies without asking\n- Do not modify database schema without migration\n- Do not hardcode secrets, URLs, or config values\n- Do not remove existing tests\n\n## When Unsure\n- Ask before making architectural decisions\n- Show the plan before implementing changes >100 lines\n- Flag security-adjacent code for manual review\n\nWhere to Put It\nTool\tFile\tNotes\nClaude Code\tCLAUDE.md in repo root\tAlso reads .claude/ directory\nCursor\t.cursor/rules/*.mdc\tSupports conditional rules with globs\nWindsurf\t.windsurfrules in repo root\tSingle file\nAider\t.aider.conf.yml + conventions in chat\tYAML config + initial prompt\nGeneric\tAGENTS.md or CONVENTIONS.md\tAny tool can be told to read it\nCursor Conditional Rules (.mdc)\n---\ndescription: React component standards\nglobs: src/components/**/*.tsx\nalwaysApply: false\n---\n\n# Component Rules\n- Functional components only (no class components)\n- Props interface above component, named [Component]Props\n- Use forwardRef for components that accept ref\n- Co-locate styles in [component].module.css\n- Co-locate tests in [component].test.tsx\n- Export component as named export, not default\n\nRules File Quality Checklist\n Stack and versions specified\n File/function size limits defined\n Naming conventions documented\n \"Do NOT\" section with common AI mistakes\n Testing expectations clear\n Architecture patterns described or referenced\n Security-sensitive areas flagged\n Dependencies policy stated\nPhase 4: Prompt Engineering for Code\nThe 5-Level Prompt Quality Hierarchy\n\nLevel 1 — Wish (bad)\n\n\"Build a todo app\"\n\nLevel 2 — Request (okay)\n\n\"Build a todo app with React and Tailwind\"\n\nLevel 3 — Specification (good)\n\n\"Build a todo app: React 18, TypeScript, Tailwind. Features: add/edit/delete/toggle todos. Store in localStorage. Responsive. Under 200 lines total.\"\n\nLevel 4 — Brief (great)\n\n\"Build a todo app. Here's the spec:\n\nStack: React 18 + TS + Tailwind + Vite\nFeatures: CRUD todos, toggle complete, filter (all/active/done), persist to localStorage\nConstraints: Single component file, under 200 lines, no external deps beyond stack\nDone when: All features work, page refresh preserves state, mobile responsive\nStart with the data types, then build up.\"\n\nLevel 5 — Contract (production-grade)\n\ntask: Todo application\nstack:\n  runtime: React 18 + TypeScript strict\n  styling: Tailwind CSS 3.x\n  build: Vite 5\n  test: Vitest + Testing Library\nfeatures:\n  - CRUD operations on todos\n  - Toggle completion status\n  - Filter: all | active | completed\n  - Bulk actions: complete all, clear completed\n  - Persist to localStorage with versioned schema\nconstraints:\n  - Max 3 component files\n  - Max 200 lines per file\n  - No external state management library\n  - Keyboard accessible (tab, enter, escape)\n  - Mobile responsive (min 320px)\nacceptance:\n  - All features functional\n  - Page refresh preserves state\n  - 90%+ test coverage\n  - No TypeScript errors (strict mode)\n  - Lighthouse accessibility score > 90\napproach: Start with types/interfaces, then hooks, then components, then tests.\n\n12 Proven Prompt Patterns\nScaffolding: \"Create the project structure with empty files and type definitions. Don't implement yet.\"\nIncremental: \"Implement only [specific function]. Don't touch other files.\"\nExplain-then-build: \"Explain how you'd architect this, then implement after I approve.\"\nTest-first: \"Write the tests first based on these requirements. Then implement to make them pass.\"\nRefactor: \"Refactor [file] to [goal]. Keep the same behavior. Don't add features.\"\nDebug: [paste error] \"This happens when [action]. Expected [behavior]. The relevant code is in [files].\"\nReview: \"Review this code for [security/performance/readability]. Be specific about issues and fixes.\"\nMigrate: \"Convert this from [old pattern] to [new pattern]. Show me the plan first.\"\nDocument: \"Add JSDoc/docstrings to all public functions in [file]. Include param types and examples.\"\nOptimize: \"This function is slow with >10K items. Profile, identify bottleneck, optimize. Keep same API.\"\nParallel session: \"Read [these files] and summarize the architecture. Don't change anything.\"\nRecovery: \"The codebase is in a broken state. [describe symptoms]. Help me understand what went wrong before we fix it.\"\nAnti-Patterns (What NOT to Prompt)\nAnti-Pattern\tWhy It Fails\tFix\n\"Build me an app\"\tToo vague, AI guesses everything\tUse Level 4+ prompts\n\"Fix it\" (no context)\tAI doesn't know what \"it\" is\tPaste error + expected behavior\n\"Rewrite everything\"\tNukes working code, introduces regressions\tIncremental refactors\n\"Make it better\"\tSubjective, AI changes random things\tSpecify what \"better\" means\n\"Use best practices\"\tAI's \"best practices\" may not match your stack\tSpecify the practices you want\nMultiple unrelated asks\tContext bleed, partial implementations\tOne task per prompt\nLong conversation chains\tContext degrades after 10+ turns\tStart fresh sessions\nPhase 5: The RPIV Workflow\n\nResearch → Plan → Implement → Validate\n\nStep 1: Research\n\n\"Read [files/docs/codebase]. Explain how [feature/module] works. Don't modify anything.\"\n\nPurpose: Load context. Catch misunderstandings before they cascade. AI explains back to you — if the explanation is wrong, the implementation will be wrong too.\n\nStep 2: Plan\n\n\"Based on your understanding, write a plan:\n\nWhich files you'll create/modify\nWhat changes in each file\nWhat order you'll implement\nWhat could go wrong\"\n\nPurpose: Review the approach before committing to it. 10x cheaper to fix a plan than debug cascading implementation errors.\n\nPlan Review Checklist:\n\n Does it touch files it shouldn't?\n Is the change order logical (types → utils → components → tests)?\n Are there missing files or steps?\n Does it respect existing patterns?\n Did it flag risks/unknowns?\nStep 3: Implement\n\n\"Proceed with the plan. Implement step by step. Stop after each file for me to verify.\"\n\nThe 200-Line Rule: If any single implementation step is >200 lines of changes, break it down further. Large changes = large bugs.\n\nCheckpoint System:\n\nAfter each file: quick scan for obvious issues\nAfter each feature: run tests\nAfter each milestone: manual test + commit\nStep 4: Validate\n\n\"Run the tests. Show me the output. If anything fails, explain why and fix it.\"\n\nThen manually verify:\n\n Feature works as specified\n Edge cases handled (empty state, max length, special chars)\n No console errors\n Mobile responsive (if UI)\n Existing features still work (regression check)\nPhase 6: Architecture for Vibe Coding\n\nAI generates better code when your architecture is clear and consistent.\n\nRecommended Project Structure\nproject/\n├── CLAUDE.md (or .cursorrules)     # AI rules\n├── README.md                        # What this is\n├── src/\n│   ├── types/                       # Shared types (AI reads these first)\n│   │   ├── index.ts\n│   │   └── [domain].ts\n│   ├── lib/                         # Pure utilities (no side effects)\n│   │   ├── [utility].ts\n│   │   └── [utility].test.ts\n│   ├── services/                    # External integrations (DB, API, etc.)\n│   │   ├── [service].ts\n│   │   └── [service].test.ts\n│   ├── components/ (or routes/)     # UI or route handlers\n│   │   ├── [Component]/\n│   │   │   ├── index.tsx\n│   │   │   ├── [Component].test.tsx\n│   │   │   └── [Component].module.css\n│   └── app/                         # App entry, layout, config\n├── tests/                           # Integration/E2E tests\n├── scripts/                         # Build/deploy/utility scripts\n└── docs/                            # Architecture decisions, API docs\n\nVibe-Friendly Patterns\nTypes first. Define your data shapes before anything else. AI uses these as contracts.\nSmall files. 300 lines max. AI handles small files better — fewer hallucinations, cleaner diffs.\nExplicit imports. No barrel exports (index.ts re-exports). AI gets confused by indirect imports.\nCo-located tests. thing.ts + thing.test.ts side by side. AI writes tests when they're right there.\nConfig in one place. Environment, feature flags, constants — one file AI can reference.\nDatabase schema as code. Drizzle/Prisma schema file = single source of truth AI can read.\nSchema-First Design\n// src/types/todo.ts — AI reads this and understands your domain\nexport interface Todo {\n  id: string;          // UUID v4\n  title: string;       // 1-200 chars, trimmed\n  completed: boolean;  // default false\n  createdAt: Date;\n  updatedAt: Date;\n}\n\nexport interface CreateTodoInput {\n  title: string;       // Required, 1-200 chars\n}\n\nexport interface UpdateTodoInput {\n  title?: string;\n  completed?: boolean;\n}\n\n// This is ALL AI needs to implement CRUD operations correctly.\n\nPhase 7: Testing in Vibe Mode\nThe Vibe Testing Pyramid\n         /  E2E  \\        ← 10% (critical user flows only)\n        / Integration \\    ← 30% (API endpoints, DB queries)\n       /    Unit Tests  \\  ← 60% (pure functions, utils, logic)\n\nTest-First Vibe Pattern\nPrompt: \"Write tests for a function that validates email addresses.\nRequirements:\n- Returns true for valid emails\n- Returns false for empty string, missing @, missing domain\n- Handles edge cases: plus addressing, subdomains, international domains\nWrite ONLY the tests. I'll implement after.\"\n\n\nThen: \"Now implement the function to make all tests pass.\"\n\nThis pattern produces better code because AI has clear acceptance criteria.\n\nWhat to Test (Minimum Viable Testing)\nCategory\tTest?\tWhy\nPure functions\tAlways\tEasy, high value, catches logic bugs\nData transformations\tAlways\tWrong transforms corrupt data silently\nAPI endpoints\tAlways\tContract verification\nUI components\tSometimes\tTest behavior, not implementation\nDatabase queries\tSometimes\tTest complex queries, skip simple CRUD\nConfig/env loading\tRarely\tTest once, trust after\nThird-party wrappers\tRarely\tTest integration, not their code\nWhen AI Tests Are Wrong\n\nSigns of bad AI tests:\n\nTests that test the implementation, not the behavior\nTests that pass with any input (always return true)\nTests that mock everything (testing mocks, not code)\nSnapshot tests for everything (brittle, meaningless)\n\nFix: \"These tests mock too much. Write tests that exercise real behavior. Only mock external services (DB, API calls). Use in-memory alternatives where possible.\"\n\nPhase 8: Debugging with AI\nThe Error Paste Pattern\n\nWhat Karpathy does: Copy error, paste with no comment, AI usually fixes it.\n\nWhen it works: Clear error messages, stack traces, type errors, syntax errors.\n\nWhen it doesn't (and what to do instead):\n\nSituation\tBetter Prompt\nVague runtime error\t\"When I [action], [behavior] happens. Expected [expected]. Here's the relevant code: [paste]\"\nSilent failure\t\"This function returns [wrong result] for input [input]. Expected [expected]. Walk me through the logic step by step.\"\nIntermittent bug\t\"This works sometimes but fails with [condition]. I think it's a [race condition/state issue/timing problem]. Here's the code:\"\nBuild/config error\tPaste full error + your config files. \"Don't guess — check the config values against the docs.\"\nAI broke something while fixing\t\"Stop. Let's go back. The original issue was [X]. You introduced a new bug: [Y]. Let's fix the original issue without changing [Z].\"\nThe 3-Strike Rule\n\nIf AI can't fix something in 3 attempts:\n\nStop. Don't keep asking the same thing.\nReframe. Describe the behavior you want, not the error.\nSimplify. Create a minimal reproduction case.\nStart fresh. New session, clean context.\nManual. Sometimes you need to read the code yourself.\nRecovery Playbooks\n\nSpaghetti Code (AI made a mess)\n\n1. git stash (save current mess)\n2. git checkout [last good commit]\n3. Start a NEW AI session\n4. Paste only the requirements, not the broken code\n5. \"Implement this from scratch following these patterns: [your conventions]\"\n\n\nRecurring Bug (Fix breaks something else)\n\n1. Write a failing test for the bug\n2. Write regression tests for the things that keep breaking\n3. \"Make ALL these tests pass. Don't modify the tests.\"\n\n\nDependency Hell\n\n1. Check `package.json` / `requirements.txt` — AI sometimes adds conflicting deps\n2. \"List all dependencies you added and why each is needed\"\n3. Remove anything that duplicates existing functionality\n4. Lock versions: \"Pin all dependencies to exact versions\"\n\n\nContext Exhaustion (AI forgot earlier instructions)\n\n1. Start a new session\n2. Load rules file + key files\n3. Summarize what's done and what remains\n4. Continue with fresh context\n\nPhase 9: Production Graduation Checklist\n\nBefore ANY vibe-coded project goes to production:\n\nP0 — Security (Must fix)\n No hardcoded secrets (grep for API keys, passwords, tokens)\n Input validation on all user inputs (XSS, SQL injection, path traversal)\n Authentication checks on protected routes\n Authorization: users can only access their own data\n HTTPS enforced\n Dependencies: npm audit / pip audit — zero critical/high\n Rate limiting on public endpoints\n CORS configured (not * in production)\n Error messages don't leak internals (no stack traces to users)\nP1 — Performance (Should fix)\n Database queries have indexes for common filters\n No N+1 queries (check ORM query logs)\n Images optimized (WebP, lazy load)\n Bundle size reasonable (<200KB initial JS)\n Loading states for async operations\n Pagination for list endpoints (no unbounded queries)\nP2 — Reliability (Should fix)\n Error handling: try/catch on all async operations\n Graceful degradation when services are down\n Health check endpoint\n Logging (structured, not console.log)\n Environment config via env vars (not hardcoded)\n Database migrations (not raw SQL)\n Backup strategy for data\nP3 — Quality (Nice to have)\n Test coverage >80%\n TypeScript strict mode / type hints everywhere\n Linter configured and clean\n README with setup instructions\n CI pipeline runs tests on push\n\nAI-Assisted Hardening Prompt:\n\n\"Review this codebase for production readiness. Check against this list: [paste checklist]. For each item, tell me: pass/fail/not applicable, and what to fix if fail. Be specific — file names and line numbers.\"\n\nPhase 10: Advanced Patterns\nParallel AI Sessions\n\nRun multiple AI sessions simultaneously:\n\nSession A: Implementing backend API\nSession B: Building frontend components\nSession C: Writing tests\n\nRules for parallel sessions:\n\nDefine interfaces/types FIRST (shared contract)\nEach session gets its own rules file section\nMerge via git (commit each session's work to a branch)\nIntegration test after merging\nPair Programming Patterns\n\nNavigator-Driver (you navigate, AI drives)\n\nYou: \"We need to add pagination. The API should accept page and limit query params. Return items, total count, and hasNextPage.\" AI: [implements] You: \"Good. Now add cursor-based pagination as an alternative. The cursor should be the last item's ID.\" AI: [implements]\n\nPing-Pong (alternate implementing)\n\nYou: Write the test AI: Write the implementation You: Write the next test AI: Write the next implementation (TDD style — extremely effective)\n\nRubber Duck (AI explains, you catch issues)\n\n\"Walk me through this code line by line. Explain what each function does, what could go wrong, and what assumptions you're making.\" (AI explains → you catch bad assumptions before they become bugs)\n\nContext Window Management\nStrategy\tWhen\tHow\nFresh start\tEvery 15-20 turns\tNew session, reload rules + key files\nSummarize\tBefore complex task\t\"Summarize what we've done. Then let's tackle [next thing].\"\nFile focus\tLarge codebase\t\"Only look at src/services/auth.ts. Ignore everything else.\"\nMemory file\tMulti-session project\tKeep PROGRESS.md with what's done/remaining\nGit Workflow for Vibe Coding\n# Before starting\ngit checkout -b feature/[name]\ngit status  # clean working tree\n\n# During (commit often!)\ngit add -A && git commit -m \"feat: [what AI just implemented]\"\n# Every 2-3 AI turns, commit. Your safety net.\n\n# If things go wrong\ngit diff  # see what AI changed\ngit stash  # save mess\ngit checkout .  # nuclear option: discard all changes\n\n# When done\ngit diff main..HEAD  # review ALL changes before merging\n\nPhase 11: Common Mistakes & How to Avoid Them\n#\tMistake\tConsequence\tPrevention\n1\tNo rules file\tAI reinvents conventions each session\tWrite rules file before first prompt\n2\tPrompting implementation before plan\tCascading wrong assumptions\tAlways: Research → Plan → Implement\n3\tNever reading AI's code\tHidden bugs, security holes, debt\tReview at least critical paths\n4\tOne giant prompt\tAI loses focus, partial implementation\tOne task per prompt, sequential\n5\tNot committing frequently\tCan't rollback when AI breaks things\tCommit every 2-3 turns\n6\tIgnoring test failures\t\"It works on my machine\"\tTests pass = done. Not before.\n7\tLetting AI add dependencies freely\tBloated bundle, version conflicts\t\"Don't add deps without asking\" in rules\n8\tNo production checklist\tShip security holes\tPhase 9 checklist before deploy\n9\tMarathon AI sessions\tContext degrades, AI \"forgets\"\tFresh session every 15-20 turns\n10\tVibe coding auth/payments\tCritical bugs in critical paths\tManual review for all security code\n11\tNo types/schema\tAI guesses data shapes differently each time\tDefine types FIRST, always\n12\tTrusting AI's \"it works\"\tAI confidently ships broken code\tVerify yourself. Run it. Test it.\n13\tSame prompt after 3 failures\tAI stuck in a loop\tReframe, simplify, or do it manually\n14\tMixing concerns in one session\tContext pollution\tOne feature per session\n15\tNo architecture guidance\tAI creates inconsistent patterns\tDocument patterns in rules file\nPhase 12: Weekly Effectiveness Tracking\n\nTrack your vibe coding quality over time:\n\nweek_of: \"YYYY-MM-DD\"\nsessions: [count]\nfeatures_shipped: [count]\nbugs_introduced: [count]  # found post-ship\nbugs_caught_in_review: [count]  # caught before ship\navg_prompts_per_feature: [count]\ntime_saved_estimate_hours: [number]\nfresh_session_restarts: [count]\n\n# Score yourself (1-5):\nprompt_quality: [1-5]      # Are you using Level 4+ prompts?\nreview_discipline: [1-5]   # Are you reviewing critical code?\ntesting_rigor: [1-5]       # Are you testing before shipping?\narchitecture: [1-5]        # Is the codebase staying clean?\ncommit_frequency: [1-5]    # Are you committing every 2-3 turns?\n\ntotal_score: [5-25]\n\nScore\tRating\tAction\n20-25\tElite\tYou're a vibe coding conductor. Teach others.\n15-19\tSolid\tGood habits. Focus on weakest dimension.\n10-14\tLearning\tReview this guide weekly. Build the habits.\n5-9\tRisky\tSlow down. More planning, more testing, more review.\nThe 10 Commandments of Vibe Coding\nTypes first. Define your data before writing logic.\nRules file always. No rules = no consistency.\nPlan before implement. 5 minutes planning saves 5 hours debugging.\nOne task per prompt. Focus = quality.\nCommit after every win. Git is your safety net.\nTest the critical path. At minimum: happy path + one edge case.\nFresh sessions. Don't let context rot.\nReview security code. Auth, payments, data access — always manual review.\n200-line rule. If a change is bigger, break it down.\nKnow when to stop vibing. If AI can't fix it in 3 tries, change approach.\nQuick Reference Commands\n\"Read [files] and explain the architecture. Don't change anything.\"\n\"Write a plan for [feature]. List files to create/modify and changes in each.\"\n\"Implement only [specific thing]. Don't touch other files.\"\n\"Write tests first for [requirements]. Then implement to pass them.\"\n\"Review this for [security/performance/readability]. Be specific.\"\n\"This error occurs when [action]. Expected [behavior]. Here's the code: [paste]\"\n\"Refactor [file] to [goal]. Same behavior. Don't add features.\"\n\"What dependencies did you add and why? Remove anything unnecessary.\"\n\"Walk me through this code. Explain assumptions and potential issues.\"\n\"Stop. The original issue was [X]. Let's start fresh with a minimal approach.\"\n\"Run all tests. If any fail, fix them without breaking other tests.\"\n\"Check this against the production checklist: [paste P0-P3 items].\"\n\n\nBuilt by AfrexAI — the team that ships AI agents, not just AI prompts."
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/1kalin/afrexai-vibe-coding",
    "publisherUrl": "https://clawhub.ai/1kalin/afrexai-vibe-coding",
    "owner": "1kalin",
    "version": "1.0.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/afrexai-vibe-coding",
    "downloadUrl": "https://openagent3.xyz/downloads/afrexai-vibe-coding",
    "agentUrl": "https://openagent3.xyz/skills/afrexai-vibe-coding/agent",
    "manifestUrl": "https://openagent3.xyz/skills/afrexai-vibe-coding/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/afrexai-vibe-coding/agent.md"
  }
}