{
  "schemaVersion": "1.0",
  "item": {
    "slug": "solo-review",
    "name": "Review",
    "source": "tencent",
    "type": "skill",
    "category": "开发工具",
    "sourceUrl": "https://clawhub.ai/fortunto2/solo-review",
    "canonicalUrl": "https://clawhub.ai/fortunto2/solo-review",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/solo-review",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=solo-review",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SKILL.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "slug": "solo-review",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-05-08T06:41:41.869Z",
      "expiresAt": "2026-05-15T06:41:41.869Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=solo-review",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=solo-review",
        "contentDisposition": "attachment; filename=\"solo-review-1.1.1.zip\"",
        "redirectLocation": null,
        "bodySnippet": null,
        "slug": "solo-review"
      },
      "scope": "item",
      "summary": "Item download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this item.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/solo-review"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/solo-review",
    "agentPageUrl": "https://openagent3.xyz/skills/solo-review/agent",
    "manifestUrl": "https://openagent3.xyz/skills/solo-review/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/solo-review/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "/review",
        "body": "This skill is self-contained — follow the instructions below instead of delegating to external review skills (superpowers, etc.) or spawning Task subagents. Run all checks directly.\n\nFinal quality gate before shipping. Runs tests, checks security, verifies acceptance criteria from spec.md, audits code quality, and generates a ship-ready report with go/no-go verdict."
      },
      {
        "title": "When to use",
        "body": "After /deploy (or /build if deploying manually). This is the quality gate.\n\nPipeline: /deploy → /review\n\nCan also be used standalone: /review on any project to audit code quality."
      },
      {
        "title": "MCP Tools (use if available)",
        "body": "session_search(query) — find past review patterns and common issues\nproject_code_search(query, project) — find similar code patterns across projects\ncodegraph_query(query) — check dependencies, imports, unused code\n\nIf MCP tools are not available, fall back to Glob + Grep + Read."
      },
      {
        "title": "1. Architecture overview (if MCP available)",
        "body": "codegraph_explain(project=\"{project name}\")\n\nReturns: stack, languages, directory layers, key patterns, top dependencies, hub files. Use this to detect stack and understand project structure."
      },
      {
        "title": "2. Essential docs (parallel reads)",
        "body": "CLAUDE.md — architecture, Do/Don't rules\ndocs/plan/*/spec.md — acceptance criteria to verify (REQUIRED)\ndocs/plan/*/plan.md — task completion status (REQUIRED)\ndocs/workflow.md — TDD policy, quality standards, integration testing commands (if exists)\n\nDo NOT read source code at this stage. Only docs."
      },
      {
        "title": "3. Detect stack",
        "body": "Use stack from codegraph_explain response (or CLAUDE.md if no MCP) to choose tools:\n\nNext.js → npm run build, npm test, npx next lint\nPython → uv run pytest, uv run ruff check\nSwift → swift test, swiftlint\nKotlin → ./gradlew test, ./gradlew lint"
      },
      {
        "title": "4. Smart source code loading (for code quality spot check)",
        "body": "Do NOT read random source files. Use the graph to find the most important code:\n\ncodegraph_query(\"MATCH (f:File {project: '{name}'})-[e]-() RETURN f.path, COUNT(e) AS edges ORDER BY edges DESC LIMIT 5\")\n\nRead only the top 3-5 hub files (most connected = most impactful). For security checks, use Grep with narrow patterns (sk_live, password\\s*=) — not full file reads."
      },
      {
        "title": "Review Dimensions",
        "body": "Makefile convention: If Makefile exists in project root, always prefer make targets over raw commands. Use make test instead of npm test, make lint instead of pnpm lint, make build instead of pnpm build. Run make help (or read Makefile) to discover available targets including integration tests.\n\nRun all 12 dimensions in sequence. Report findings per dimension."
      },
      {
        "title": "1. Test Suite",
        "body": "Run the full test suite (prefer make test if Makefile exists):\n\n# If Makefile exists — use it\nmake test 2>&1 || true\n\n# Fallback: Next.js / Node\nnpm test -- --coverage 2>&1 || true\n\n# Python\nuv run pytest --tb=short -q 2>&1 || true\n\n# Swift\nswift test 2>&1 || true\n\nReport:\n\nTotal tests: pass / fail / skip\nCoverage percentage (if available)\nAny failing tests with file:line references\n\nIntegration tests — if docs/workflow.md has an \"Integration Testing\" section, run the specified commands:\n\nExecute the CLI/integration commands listed there\nVerify exit code 0 and expected output format\nReport: command run, exit code, pass/fail"
      },
      {
        "title": "2. Linter & Type Check",
        "body": "# Next.js\npnpm lint 2>&1 || true\npnpm tsc --noEmit 2>&1 || true\n\n# Python\nuv run ruff check . 2>&1 || true\nuv run ty check . 2>&1 || true\n\n# Swift\nswiftlint lint --strict 2>&1 || true\n\n# Kotlin\n./gradlew detekt 2>&1 || true\n./gradlew ktlintCheck 2>&1 || true\n\nReport: warnings count, errors count, top issues."
      },
      {
        "title": "3. Build Verification",
        "body": "# Next.js\nnpm run build 2>&1 || true\n\n# Python\nuv run python -m py_compile src/**/*.py 2>&1 || true\n\n# Astro\nnpm run build 2>&1 || true\n\nReport: build success/failure, any warnings."
      },
      {
        "title": "4. Security Audit",
        "body": "Dependency vulnerabilities:\n\n# Node\nnpm audit --audit-level=moderate 2>&1 || true\n\n# Python\nuv run pip-audit 2>&1 || true\n\nCode-level checks (Grep for common issues):\n\nHardcoded secrets: grep -rn \"sk_live\\|sk_test\\|password\\s*=\\s*['\\\"]\" src/ app/ lib/\nSQL injection: look for string concatenation in queries\nXSS: look for dangerouslySetInnerHTML without sanitization\nExposed env vars: check .gitignore includes .env*\n\nReport: vulnerabilities found, severity levels."
      },
      {
        "title": "5. Acceptance Criteria Verification",
        "body": "Read docs/plan/*/spec.md and check each acceptance criterion:\n\nFor each - [ ] criterion in spec.md:\n\nSearch codebase for evidence it was implemented.\nCheck if related tests exist.\nMark as verified or flag as missing.\n\nUpdate spec.md checkboxes. After verifying each criterion, use Edit tool to change - [ ] to - [x] in spec.md. Leaving verified criteria unchecked causes staleness across pipeline runs — check them off as you go.\n\nAcceptance Criteria:\n  - [x] User can sign up with email — found in app/auth/signup/page.tsx + test\n  - [x] Dashboard shows project list — found in app/dashboard/page.tsx\n  - [ ] Stripe checkout works — route exists but no test coverage\n\nAfter updating checkboxes, commit: git add docs/plan/*/spec.md && git commit -m \"docs: update spec checkboxes (verified by review)\""
      },
      {
        "title": "6. Code Quality Spot Check",
        "body": "Read 3-5 key files (entry points, API routes, main components):\n\nCheck for TODO/FIXME/HACK comments that should be resolved\nCheck for console.log/print statements left in production code\nCheck for proper error handling (try/catch, error boundaries)\nCheck for proper loading/error states in UI components\n\nReport specific file:line references for any issues found."
      },
      {
        "title": "7. Plan Completion Check",
        "body": "Read docs/plan/*/plan.md:\n\nCount completed tasks [x] vs total tasks\nFlag any [ ] or [~] tasks still remaining\nVerify all phase checkpoints have SHAs"
      },
      {
        "title": "8. Production Logs (if deployed)",
        "body": "If the project has been deployed (deploy URL in CLAUDE.md, or .solo/states/deploy exists if pipeline state directory is present), check production logs for runtime errors.\n\nRead the logs field from the stack YAML (templates/stacks/{stack}.yaml) to get platform-specific commands.\n\nVercel (Next.js):\n\nvercel logs --output=short 2>&1 | tail -50\n\nLook for: Error, FUNCTION_INVOCATION_FAILED, 504, unhandled rejections, hydration mismatches.\n\nCloudflare Workers:\n\nwrangler tail --format=pretty 2>&1 | head -50\n\nLook for: uncaught exceptions, D1 errors, R2 access failures.\n\nFly.io (Python API):\n\nfly logs --app {name} 2>&1 | tail -50\n\nLook for: ERROR, CRITICAL, OOM, connection refused, unhealthy instances.\n\nSupabase Edge Functions:\n\nsupabase functions logs --scroll 2>&1 | tail -30\n\niOS (TestFlight):\n\nCheck App Store Connect → TestFlight → Crashes\nIf local device: log stream --predicate 'subsystem == \"com.{org}.{name}\"'\n\nAndroid:\n\nadb logcat '*:E' --format=time 2>&1 | tail -30\n\nCheck Google Play Console → Android vitals → Crashes & ANRs\n\nIf no deploy yet: skip this dimension, note in report as \"N/A — not deployed\".\n\nIf logs show errors:\n\nClassify: startup crash vs runtime error vs intermittent\nAdd as FIX FIRST issues in the report\nInclude exact log lines as evidence\n\nReport:\n\nLog source checked (platform, command used)\nErrors found: count + severity\nError patterns (recurring vs one-off)\nStatus: CLEAN / WARN / ERRORS"
      },
      {
        "title": "9. Dev Principles Compliance",
        "body": "Check adherence to dev principles. Look for templates/principles/dev-principles.md (bundled with this skill), or check CLAUDE.md or project docs for architecture and coding conventions.\n\nRead the dev principles file, then spot-check 3-5 key source files for violations:\n\nSOLID:\n\nSRP — any god-class/god-module doing auth + profile + email + notifications? Flag bloated files (>300 LOC with mixed responsibilities).\nDIP — are services injected or hardcoded? Look for new ConcreteService() inside business logic instead of dependency injection.\n\nDRY vs Rule of Three:\n\nSearch for duplicated logic blocks (Grep for identical function signatures across files).\nBut don't flag 2-3 similar lines — duplication is OK until a pattern emerges.\n\nKISS:\n\nOver-engineered abstractions for one-time operations?\nFeature flags or backward-compat shims where a simple change would do?\nHelpers/utilities used only once?\n\nSchemas-First (SGR):\n\nAre Pydantic/Zod schemas defined before logic? Or is raw data passed around?\nAre API responses typed (not any / dict)?\nValidation at boundaries (user input, external APIs)?\n\nClean Architecture:\n\nDo dependencies point inward? Business logic should not import from UI/framework layer.\nIs business logic framework-independent?\n\nError Handling:\n\nFail-fast on invalid inputs? Or silent swallowing of errors?\nUser-facing errors are friendly? Internal errors have stack traces?\n\nReport:\n\nPrinciples followed: list key ones observed\nViolations found: with file:line references\nSeverity: MINOR (style) / MAJOR (architecture) / CRITICAL (data loss risk)"
      },
      {
        "title": "10. Commit Quality",
        "body": "Check git history for the current track/feature:\n\ngit log --oneline --since=\"1 week ago\" 2>&1 | head -30\n\nConventional commits format:\n\nEach commit follows <type>(<scope>): <description> pattern\nTypes: feat, fix, refactor, test, docs, chore, perf, style\nFlag: generic messages (\"fix\", \"update\", \"wip\", \"changes\"), missing type prefix, too-long titles (>72 chars)\n\nAtomicity:\n\nEach commit = one logical change? Or monster commits with 20 files across unrelated features?\nRevert-friendly? Could you git revert a single commit without side effects?\n\nSHAs in plan.md:\n\nCheck that completed tasks have <!-- sha:abc1234 --> comments\nCheck that phase checkpoints have <!-- checkpoint:abc1234 -->\n\ngrep -c \"sha:\" docs/plan/*/plan.md 2>/dev/null || echo \"No SHAs found\"\n\nPre-commit hooks:\n\nRead the stack YAML pre_commit field to know what system is expected (husky/pre-commit/lefthook) and what it should run (linter + formatter + type-checker). Then verify:\n\n# Detect what's configured\n[ -f .husky/pre-commit ] && echo \"husky\" || [ -f .pre-commit-config.yaml ] && echo \"pre-commit\" || [ -f lefthook.yml ] && echo \"lefthook\" || echo \"none\"\n\nHooks installed? Check config files exist AND hooks are wired (core.hooksPath for husky, .git/hooks/pre-commit for pre-commit/lefthook).\nHooks match stack? Compare detected system with stack YAML pre_commit field. Flag mismatch.\n--no-verify bypasses? Check if recent commits show signs of skipped hooks (e.g., lint violations that should've been caught). Flag as WARN.\nNot configured? Flag as WARN recommendation — stack YAML expects {pre_commit} but nothing found.\n\nReport:\n\nTotal commits: {N}\nConventional format: {N}/{M} compliant\nAtomic commits: YES / NO (with examples of violations)\nPlan SHAs: {N}/{M} tasks have SHAs\nPre-commit hooks: {ACTIVE / NOT INSTALLED / NOT CONFIGURED} (expected: {stack pre_commit})"
      },
      {
        "title": "11. Documentation Freshness",
        "body": "Check that project documentation is up-to-date with the code.\n\nRequired files check:\n\nls -la CLAUDE.md README.md docs/prd.md docs/workflow.md 2>&1\n\nCLAUDE.md:\n\nDoes it reflect current tech stack, commands, directory structure?\nAre recently added features/endpoints documented?\nGrep for outdated references (old package names, removed files):\n# Check that files mentioned in CLAUDE.md actually exist\ngrep -oP '`[a-zA-Z0-9_./-]+\\.(ts|py|swift|kt|md)`' CLAUDE.md | while read f; do [ ! -f \"$f\" ] && echo \"MISSING: $f\"; done\n\nREADME.md:\n\nDoes it have setup/run/test/deploy instructions?\nAre the commands actually runnable?\n\ndocs/prd.md:\n\nDo features match what was actually built?\nAre metrics and success criteria defined?\n\nAICODE- comments:\n\ngrep -rn \"AICODE-TODO\" src/ app/ lib/ 2>/dev/null | head -10\ngrep -rn \"AICODE-ASK\" src/ app/ lib/ 2>/dev/null | head -10\n\nFlag unresolved AICODE-TODO items that were completed but not cleaned up\nFlag unanswered AICODE-ASK questions\nCheck for AICODE-NOTE on complex/non-obvious logic\n\nDead code check:\n\nUnused imports (linter should catch, but verify)\nOrphaned files not imported anywhere\nIf knip available (Next.js): pnpm knip 2>&1 | head -30\n\nReport:\n\nCLAUDE.md: CURRENT / STALE / MISSING\nREADME.md: CURRENT / STALE / MISSING\ndocs/prd.md: CURRENT / STALE / MISSING\ndocs/workflow.md: CURRENT / STALE / MISSING\nAICODE-TODO unresolved: {N}\nAICODE-ASK unanswered: {N}\nDead code: {files/exports found}"
      },
      {
        "title": "12. Visual/E2E Testing",
        "body": "If browser tools or device tools are available, run a visual smoke test.\n\nWeb projects (Playwright MCP or browser tools):\n\nStart dev server (use dev_server.command from stack YAML, e.g. pnpm dev)\nUse Playwright MCP tools (or browser-use skill) to navigate to the main page\nVerify it loads without console errors, hydration mismatches, or React errors\nNavigate to 2-3 key pages (based on spec.md features)\nTake screenshots at desktop (1280px) and mobile (375px) viewports\nLook for broken images, missing styles, layout overflow\n\niOS projects (simulator):\n\nBuild for simulator: xcodebuild -scheme {Name} -sdk iphonesimulator build\nInstall and launch on booted simulator\nTake screenshot of main screen\nCheck simulator logs for crashes or assertion failures\n\nAndroid projects (emulator):\n\nBuild debug APK: ./gradlew assembleDebug\nInstall and launch on emulator\nTake screenshot of main activity\nCheck logcat for crashes or ANRs: adb logcat '*:E' --format=time -d 2>&1 | tail -20\n\nIf tools are not available: skip this dimension, note as \"N/A — no browser/device tools\" in the report. Visual testing is never a blocker for SHIP verdict on its own.\n\nReport:\n\nPlatform tested: {browser / simulator / emulator / N/A}\nPages/screens checked: {N}\nConsole errors: {N}\nVisual issues: {NONE / list}\nResponsive: {PASS / issues found}\nStatus: {PASS / WARN / FAIL / N/A}"
      },
      {
        "title": "Review Report",
        "body": "Generate the final report:\n\nCode Review: {project-name}\nDate: {YYYY-MM-DD}\n\n## Verdict: {SHIP / FIX FIRST / BLOCK}\n\n### Summary\n{1-2 sentence overall assessment}\n\n### Tests\n- Total: {N} | Pass: {N} | Fail: {N} | Skip: {N}\n- Coverage: {N}%\n- Status: {PASS / FAIL}\n\n### Linter\n- Errors: {N} | Warnings: {N}\n- Status: {PASS / WARN / FAIL}\n\n### Build\n- Status: {PASS / FAIL}\n- Warnings: {N}\n\n### Security\n- Vulnerabilities: {N} (critical: {N}, high: {N}, moderate: {N})\n- Hardcoded secrets: {NONE / FOUND}\n- Status: {PASS / WARN / FAIL}\n\n### Acceptance Criteria\n- Verified: {N}/{M}\n- Missing: {list}\n- Status: {PASS / PARTIAL / FAIL}\n\n### Plan Progress\n- Tasks: {N}/{M} complete\n- Phases: {N}/{M} complete\n- Status: {COMPLETE / IN PROGRESS}\n\n### Production Logs\n- Platform: {Vercel / Cloudflare / Fly.io / N/A}\n- Errors: {N} | Warnings: {N}\n- Status: {CLEAN / WARN / ERRORS / N/A}\n\n### Dev Principles\n- SOLID: {PASS / violations found}\n- Schemas-first: {YES / raw data found}\n- Error handling: {PASS / issues found}\n- Status: {PASS / WARN / FAIL}\n\n### Commits\n- Total: {N} | Conventional: {N}/{M}\n- Atomic: {YES / NO}\n- Plan SHAs: {N}/{M}\n- Status: {PASS / WARN / FAIL}\n\n### Documentation\n- CLAUDE.md: {CURRENT / STALE / MISSING}\n- README.md: {CURRENT / STALE / MISSING}\n- AICODE-TODO unresolved: {N}\n- Dead code: {NONE / found}\n- Status: {PASS / WARN / FAIL}\n\n### Visual Testing\n- Platform: {browser / simulator / emulator / N/A}\n- Pages/screens: {N}\n- Console errors: {N}\n- Visual issues: {NONE / list}\n- Status: {PASS / WARN / FAIL / N/A}\n\n### Issues Found\n1. [{severity}] {description} — {file:line}\n2. [{severity}] {description} — {file:line}\n\n### Recommendations\n- {actionable recommendation}\n- {actionable recommendation}\n\nVerdict logic:\n\nSHIP: All tests pass, no security issues, acceptance criteria met, build succeeds, production logs clean, docs current, commits atomic, no critical visual issues\nFIX FIRST: Minor issues (warnings, partial criteria, low-severity vulns, intermittent log errors, stale docs, non-conventional commits, minor SOLID violations, minor visual issues like layout overflow) — list what to fix\nBLOCK: Failing tests, security vulnerabilities, missing critical features, production crashes in logs, missing CLAUDE.md/README.md, critical architecture violations, app crashes on launch (simulator/emulator) — do not ship"
      },
      {
        "title": "Post-Verdict: CLAUDE.md Revision",
        "body": "After the verdict report, revise the project's CLAUDE.md to keep it lean and useful for future agents."
      },
      {
        "title": "Steps:",
        "body": "Read CLAUDE.md and check size: wc -c CLAUDE.md\nAdd learnings from this review:\n\nNew Do/Don't rules discovered during review\nUpdated commands, workflows, or architecture decisions\nFixed issues or gotchas worth remembering\nStack/dependency changes (new packages, removed deps)\n\n\nIf over 40,000 characters — trim ruthlessly:\n\nCollapse completed phase/milestone histories into one line each\nRemove verbose explanations — keep terse, actionable notes\nRemove duplicate info (same thing explained in multiple sections)\nRemove historical migration notes, old debugging context\nRemove examples that are obvious from code or covered by skill/doc files\nRemove outdated troubleshooting for resolved issues\n\n\nVerify result ≤ 40,000 characters — if still over, cut least actionable content\nWrite updated CLAUDE.md, update \"Last updated\" date"
      },
      {
        "title": "Priority (keep → cut):",
        "body": "ALWAYS KEEP: Tech stack, directory structure, Do/Don't rules, common commands, architecture decisions\nKEEP: Workflow instructions, troubleshooting for active issues, key file references\nCONDENSE: Phase histories (one line each), detailed examples, tool/MCP listings\nCUT FIRST: Historical notes, verbose explanations, duplicated content, resolved issues"
      },
      {
        "title": "Rules:",
        "body": "Never remove Do/Don't sections — critical guardrails\nPreserve overall section structure and ordering\nEvery line must earn its place: \"would a future agent need this to do their job?\"\nCommit the update: git add CLAUDE.md && git commit -m \"docs: revise CLAUDE.md (post-review)\""
      },
      {
        "title": "AFTER CLAUDE.md revision — output signal EXACTLY ONCE:",
        "body": "Output pipeline signal ONLY if pipeline state directory (.solo/states/) exists.\n\nOutput the signal tag ONCE and ONLY ONCE. Do not repeat it. The pipeline detects the first occurrence.\n\nIf SHIP: output this exact line (once):\n\n<solo:done/>\n\nIf FIX FIRST or BLOCK:\n\nOpen plan.md and APPEND a new phase with fix tasks (one - [ ] Task per issue found)\nChange plan.md status from [x] Complete to [~] In Progress\nCommit: git add docs/plan/ && git commit -m \"fix: add review fix tasks\"\nOutput this exact line (once):\n\n<solo:redo/>\n\nThe pipeline reads these tags and handles all marker files automatically. You do NOT need to create or delete any marker files yourself.\nOutput the signal tag once — the pipeline detects the first occurrence."
      },
      {
        "title": "Tests won't run",
        "body": "Cause: Missing dependencies or test config.\nFix: Run npm install / uv sync, check test config exists (jest.config, pytest.ini)."
      },
      {
        "title": "Linter not configured",
        "body": "Cause: No linter config file found.\nFix: Note as a recommendation in the report, not a blocker."
      },
      {
        "title": "Build fails",
        "body": "Cause: Type errors, import issues, missing env vars.\nFix: Report specific errors. This is a BLOCK verdict — must fix before shipping."
      },
      {
        "title": "Two-Stage Review Pattern",
        "body": "When reviewing significant work, use two stages:\n\nStage 1 — Spec Compliance:\n\nDoes the implementation match spec.md requirements?\nAre all acceptance criteria actually met (not just claimed)?\nAny deviations from the plan? If so, are they justified improvements or problems?\n\nStage 2 — Code Quality:\n\nArchitecture patterns, error handling, type safety\nTest coverage and test quality\nSecurity and performance\nCode organization and maintainability"
      },
      {
        "title": "Verification Gate",
        "body": "No verdict without fresh evidence.\n\nBefore writing any verdict (SHIP/FIX/BLOCK):\n\nRun the actual test/build/lint commands (not cached results).\nRead full output — exit codes, pass/fail counts, error messages.\nConfirm the output matches your claim.\nOnly then write the verdict with evidence.\n\nNever write \"tests should pass\" — run them and show the output."
      },
      {
        "title": "Rationalizations Catalog",
        "body": "ThoughtReality\"Tests were passing earlier\"Run them NOW. Code changed since then.\"It's just a warning\"Warnings become bugs. Report them.\"The build worked locally\"Check the platform too. Environment differences matter.\"Security scan is overkill\"One missed secret = data breach. Always scan.\"Good enough to ship\"Quantify \"good enough\". Show the numbers.\"I already checked this\"Fresh evidence only. Stale checks are worthless."
      },
      {
        "title": "Critical Rules",
        "body": "Run all checks — do not skip dimensions even if project seems simple.\nBe specific — always include file:line references for issues.\nVerdict must be justified — every SHIP/FIX/BLOCK needs evidence from actual commands.\nDon't auto-fix code — report issues and add fix tasks to plan.md. Let /build fix them. Review only modifies plan.md, never source code.\nCheck acceptance criteria — spec.md is the source of truth for \"done\".\nSecurity is non-negotiable — any hardcoded secret = BLOCK.\nFresh evidence only — run commands before making claims. Never rely on memory."
      }
    ],
    "body": "/review\n\nThis skill is self-contained — follow the instructions below instead of delegating to external review skills (superpowers, etc.) or spawning Task subagents. Run all checks directly.\n\nFinal quality gate before shipping. Runs tests, checks security, verifies acceptance criteria from spec.md, audits code quality, and generates a ship-ready report with go/no-go verdict.\n\nWhen to use\n\nAfter /deploy (or /build if deploying manually). This is the quality gate.\n\nPipeline: /deploy → /review\n\nCan also be used standalone: /review on any project to audit code quality.\n\nMCP Tools (use if available)\nsession_search(query) — find past review patterns and common issues\nproject_code_search(query, project) — find similar code patterns across projects\ncodegraph_query(query) — check dependencies, imports, unused code\n\nIf MCP tools are not available, fall back to Glob + Grep + Read.\n\nPre-flight Checks\n1. Architecture overview (if MCP available)\ncodegraph_explain(project=\"{project name}\")\n\n\nReturns: stack, languages, directory layers, key patterns, top dependencies, hub files. Use this to detect stack and understand project structure.\n\n2. Essential docs (parallel reads)\nCLAUDE.md — architecture, Do/Don't rules\ndocs/plan/*/spec.md — acceptance criteria to verify (REQUIRED)\ndocs/plan/*/plan.md — task completion status (REQUIRED)\ndocs/workflow.md — TDD policy, quality standards, integration testing commands (if exists)\n\nDo NOT read source code at this stage. Only docs.\n\n3. Detect stack\n\nUse stack from codegraph_explain response (or CLAUDE.md if no MCP) to choose tools:\n\nNext.js → npm run build, npm test, npx next lint\nPython → uv run pytest, uv run ruff check\nSwift → swift test, swiftlint\nKotlin → ./gradlew test, ./gradlew lint\n4. Smart source code loading (for code quality spot check)\n\nDo NOT read random source files. Use the graph to find the most important code:\n\ncodegraph_query(\"MATCH (f:File {project: '{name}'})-[e]-() RETURN f.path, COUNT(e) AS edges ORDER BY edges DESC LIMIT 5\")\n\n\nRead only the top 3-5 hub files (most connected = most impactful). For security checks, use Grep with narrow patterns (sk_live, password\\s*=) — not full file reads.\n\nReview Dimensions\n\nMakefile convention: If Makefile exists in project root, always prefer make targets over raw commands. Use make test instead of npm test, make lint instead of pnpm lint, make build instead of pnpm build. Run make help (or read Makefile) to discover available targets including integration tests.\n\nRun all 12 dimensions in sequence. Report findings per dimension.\n\n1. Test Suite\n\nRun the full test suite (prefer make test if Makefile exists):\n\n# If Makefile exists — use it\nmake test 2>&1 || true\n\n# Fallback: Next.js / Node\nnpm test -- --coverage 2>&1 || true\n\n# Python\nuv run pytest --tb=short -q 2>&1 || true\n\n# Swift\nswift test 2>&1 || true\n\n\nReport:\n\nTotal tests: pass / fail / skip\nCoverage percentage (if available)\nAny failing tests with file:line references\n\nIntegration tests — if docs/workflow.md has an \"Integration Testing\" section, run the specified commands:\n\nExecute the CLI/integration commands listed there\nVerify exit code 0 and expected output format\nReport: command run, exit code, pass/fail\n2. Linter & Type Check\n# Next.js\npnpm lint 2>&1 || true\npnpm tsc --noEmit 2>&1 || true\n\n# Python\nuv run ruff check . 2>&1 || true\nuv run ty check . 2>&1 || true\n\n# Swift\nswiftlint lint --strict 2>&1 || true\n\n# Kotlin\n./gradlew detekt 2>&1 || true\n./gradlew ktlintCheck 2>&1 || true\n\n\nReport: warnings count, errors count, top issues.\n\n3. Build Verification\n# Next.js\nnpm run build 2>&1 || true\n\n# Python\nuv run python -m py_compile src/**/*.py 2>&1 || true\n\n# Astro\nnpm run build 2>&1 || true\n\n\nReport: build success/failure, any warnings.\n\n4. Security Audit\n\nDependency vulnerabilities:\n\n# Node\nnpm audit --audit-level=moderate 2>&1 || true\n\n# Python\nuv run pip-audit 2>&1 || true\n\n\nCode-level checks (Grep for common issues):\n\nHardcoded secrets: grep -rn \"sk_live\\|sk_test\\|password\\s*=\\s*['\\\"]\" src/ app/ lib/\nSQL injection: look for string concatenation in queries\nXSS: look for dangerouslySetInnerHTML without sanitization\nExposed env vars: check .gitignore includes .env*\n\nReport: vulnerabilities found, severity levels.\n\n5. Acceptance Criteria Verification\n\nRead docs/plan/*/spec.md and check each acceptance criterion:\n\nFor each - [ ] criterion in spec.md:\n\nSearch codebase for evidence it was implemented.\nCheck if related tests exist.\nMark as verified or flag as missing.\n\nUpdate spec.md checkboxes. After verifying each criterion, use Edit tool to change - [ ] to - [x] in spec.md. Leaving verified criteria unchecked causes staleness across pipeline runs — check them off as you go.\n\nAcceptance Criteria:\n  - [x] User can sign up with email — found in app/auth/signup/page.tsx + test\n  - [x] Dashboard shows project list — found in app/dashboard/page.tsx\n  - [ ] Stripe checkout works — route exists but no test coverage\n\n\nAfter updating checkboxes, commit: git add docs/plan/*/spec.md && git commit -m \"docs: update spec checkboxes (verified by review)\"\n\n6. Code Quality Spot Check\n\nRead 3-5 key files (entry points, API routes, main components):\n\nCheck for TODO/FIXME/HACK comments that should be resolved\nCheck for console.log/print statements left in production code\nCheck for proper error handling (try/catch, error boundaries)\nCheck for proper loading/error states in UI components\n\nReport specific file:line references for any issues found.\n\n7. Plan Completion Check\n\nRead docs/plan/*/plan.md:\n\nCount completed tasks [x] vs total tasks\nFlag any [ ] or [~] tasks still remaining\nVerify all phase checkpoints have SHAs\n8. Production Logs (if deployed)\n\nIf the project has been deployed (deploy URL in CLAUDE.md, or .solo/states/deploy exists if pipeline state directory is present), check production logs for runtime errors.\n\nRead the logs field from the stack YAML (templates/stacks/{stack}.yaml) to get platform-specific commands.\n\nVercel (Next.js):\n\nvercel logs --output=short 2>&1 | tail -50\n\n\nLook for: Error, FUNCTION_INVOCATION_FAILED, 504, unhandled rejections, hydration mismatches.\n\nCloudflare Workers:\n\nwrangler tail --format=pretty 2>&1 | head -50\n\n\nLook for: uncaught exceptions, D1 errors, R2 access failures.\n\nFly.io (Python API):\n\nfly logs --app {name} 2>&1 | tail -50\n\n\nLook for: ERROR, CRITICAL, OOM, connection refused, unhealthy instances.\n\nSupabase Edge Functions:\n\nsupabase functions logs --scroll 2>&1 | tail -30\n\n\niOS (TestFlight):\n\nCheck App Store Connect → TestFlight → Crashes\nIf local device: log stream --predicate 'subsystem == \"com.{org}.{name}\"'\n\nAndroid:\n\nadb logcat '*:E' --format=time 2>&1 | tail -30\n\nCheck Google Play Console → Android vitals → Crashes & ANRs\n\nIf no deploy yet: skip this dimension, note in report as \"N/A — not deployed\".\n\nIf logs show errors:\n\nClassify: startup crash vs runtime error vs intermittent\nAdd as FIX FIRST issues in the report\nInclude exact log lines as evidence\n\nReport:\n\nLog source checked (platform, command used)\nErrors found: count + severity\nError patterns (recurring vs one-off)\nStatus: CLEAN / WARN / ERRORS\n9. Dev Principles Compliance\n\nCheck adherence to dev principles. Look for templates/principles/dev-principles.md (bundled with this skill), or check CLAUDE.md or project docs for architecture and coding conventions.\n\nRead the dev principles file, then spot-check 3-5 key source files for violations:\n\nSOLID:\n\nSRP — any god-class/god-module doing auth + profile + email + notifications? Flag bloated files (>300 LOC with mixed responsibilities).\nDIP — are services injected or hardcoded? Look for new ConcreteService() inside business logic instead of dependency injection.\n\nDRY vs Rule of Three:\n\nSearch for duplicated logic blocks (Grep for identical function signatures across files).\nBut don't flag 2-3 similar lines — duplication is OK until a pattern emerges.\n\nKISS:\n\nOver-engineered abstractions for one-time operations?\nFeature flags or backward-compat shims where a simple change would do?\nHelpers/utilities used only once?\n\nSchemas-First (SGR):\n\nAre Pydantic/Zod schemas defined before logic? Or is raw data passed around?\nAre API responses typed (not any / dict)?\nValidation at boundaries (user input, external APIs)?\n\nClean Architecture:\n\nDo dependencies point inward? Business logic should not import from UI/framework layer.\nIs business logic framework-independent?\n\nError Handling:\n\nFail-fast on invalid inputs? Or silent swallowing of errors?\nUser-facing errors are friendly? Internal errors have stack traces?\n\nReport:\n\nPrinciples followed: list key ones observed\nViolations found: with file:line references\nSeverity: MINOR (style) / MAJOR (architecture) / CRITICAL (data loss risk)\n10. Commit Quality\n\nCheck git history for the current track/feature:\n\ngit log --oneline --since=\"1 week ago\" 2>&1 | head -30\n\n\nConventional commits format:\n\nEach commit follows <type>(<scope>): <description> pattern\nTypes: feat, fix, refactor, test, docs, chore, perf, style\nFlag: generic messages (\"fix\", \"update\", \"wip\", \"changes\"), missing type prefix, too-long titles (>72 chars)\n\nAtomicity:\n\nEach commit = one logical change? Or monster commits with 20 files across unrelated features?\nRevert-friendly? Could you git revert a single commit without side effects?\n\nSHAs in plan.md:\n\nCheck that completed tasks have <!-- sha:abc1234 --> comments\nCheck that phase checkpoints have <!-- checkpoint:abc1234 -->\ngrep -c \"sha:\" docs/plan/*/plan.md 2>/dev/null || echo \"No SHAs found\"\n\n\nPre-commit hooks:\n\nRead the stack YAML pre_commit field to know what system is expected (husky/pre-commit/lefthook) and what it should run (linter + formatter + type-checker). Then verify:\n\n# Detect what's configured\n[ -f .husky/pre-commit ] && echo \"husky\" || [ -f .pre-commit-config.yaml ] && echo \"pre-commit\" || [ -f lefthook.yml ] && echo \"lefthook\" || echo \"none\"\n\nHooks installed? Check config files exist AND hooks are wired (core.hooksPath for husky, .git/hooks/pre-commit for pre-commit/lefthook).\nHooks match stack? Compare detected system with stack YAML pre_commit field. Flag mismatch.\n--no-verify bypasses? Check if recent commits show signs of skipped hooks (e.g., lint violations that should've been caught). Flag as WARN.\nNot configured? Flag as WARN recommendation — stack YAML expects {pre_commit} but nothing found.\n\nReport:\n\nTotal commits: {N}\nConventional format: {N}/{M} compliant\nAtomic commits: YES / NO (with examples of violations)\nPlan SHAs: {N}/{M} tasks have SHAs\nPre-commit hooks: {ACTIVE / NOT INSTALLED / NOT CONFIGURED} (expected: {stack pre_commit})\n11. Documentation Freshness\n\nCheck that project documentation is up-to-date with the code.\n\nRequired files check:\n\nls -la CLAUDE.md README.md docs/prd.md docs/workflow.md 2>&1\n\n\nCLAUDE.md:\n\nDoes it reflect current tech stack, commands, directory structure?\nAre recently added features/endpoints documented?\nGrep for outdated references (old package names, removed files):\n# Check that files mentioned in CLAUDE.md actually exist\ngrep -oP '`[a-zA-Z0-9_./-]+\\.(ts|py|swift|kt|md)`' CLAUDE.md | while read f; do [ ! -f \"$f\" ] && echo \"MISSING: $f\"; done\n\n\nREADME.md:\n\nDoes it have setup/run/test/deploy instructions?\nAre the commands actually runnable?\n\ndocs/prd.md:\n\nDo features match what was actually built?\nAre metrics and success criteria defined?\n\nAICODE- comments:\n\ngrep -rn \"AICODE-TODO\" src/ app/ lib/ 2>/dev/null | head -10\ngrep -rn \"AICODE-ASK\" src/ app/ lib/ 2>/dev/null | head -10\n\nFlag unresolved AICODE-TODO items that were completed but not cleaned up\nFlag unanswered AICODE-ASK questions\nCheck for AICODE-NOTE on complex/non-obvious logic\n\nDead code check:\n\nUnused imports (linter should catch, but verify)\nOrphaned files not imported anywhere\nIf knip available (Next.js): pnpm knip 2>&1 | head -30\n\nReport:\n\nCLAUDE.md: CURRENT / STALE / MISSING\nREADME.md: CURRENT / STALE / MISSING\ndocs/prd.md: CURRENT / STALE / MISSING\ndocs/workflow.md: CURRENT / STALE / MISSING\nAICODE-TODO unresolved: {N}\nAICODE-ASK unanswered: {N}\nDead code: {files/exports found}\n12. Visual/E2E Testing\n\nIf browser tools or device tools are available, run a visual smoke test.\n\nWeb projects (Playwright MCP or browser tools):\n\nStart dev server (use dev_server.command from stack YAML, e.g. pnpm dev)\nUse Playwright MCP tools (or browser-use skill) to navigate to the main page\nVerify it loads without console errors, hydration mismatches, or React errors\nNavigate to 2-3 key pages (based on spec.md features)\nTake screenshots at desktop (1280px) and mobile (375px) viewports\nLook for broken images, missing styles, layout overflow\n\niOS projects (simulator):\n\nBuild for simulator: xcodebuild -scheme {Name} -sdk iphonesimulator build\nInstall and launch on booted simulator\nTake screenshot of main screen\nCheck simulator logs for crashes or assertion failures\n\nAndroid projects (emulator):\n\nBuild debug APK: ./gradlew assembleDebug\nInstall and launch on emulator\nTake screenshot of main activity\nCheck logcat for crashes or ANRs: adb logcat '*:E' --format=time -d 2>&1 | tail -20\n\nIf tools are not available: skip this dimension, note as \"N/A — no browser/device tools\" in the report. Visual testing is never a blocker for SHIP verdict on its own.\n\nReport:\n\nPlatform tested: {browser / simulator / emulator / N/A}\nPages/screens checked: {N}\nConsole errors: {N}\nVisual issues: {NONE / list}\nResponsive: {PASS / issues found}\nStatus: {PASS / WARN / FAIL / N/A}\nReview Report\n\nGenerate the final report:\n\nCode Review: {project-name}\nDate: {YYYY-MM-DD}\n\n## Verdict: {SHIP / FIX FIRST / BLOCK}\n\n### Summary\n{1-2 sentence overall assessment}\n\n### Tests\n- Total: {N} | Pass: {N} | Fail: {N} | Skip: {N}\n- Coverage: {N}%\n- Status: {PASS / FAIL}\n\n### Linter\n- Errors: {N} | Warnings: {N}\n- Status: {PASS / WARN / FAIL}\n\n### Build\n- Status: {PASS / FAIL}\n- Warnings: {N}\n\n### Security\n- Vulnerabilities: {N} (critical: {N}, high: {N}, moderate: {N})\n- Hardcoded secrets: {NONE / FOUND}\n- Status: {PASS / WARN / FAIL}\n\n### Acceptance Criteria\n- Verified: {N}/{M}\n- Missing: {list}\n- Status: {PASS / PARTIAL / FAIL}\n\n### Plan Progress\n- Tasks: {N}/{M} complete\n- Phases: {N}/{M} complete\n- Status: {COMPLETE / IN PROGRESS}\n\n### Production Logs\n- Platform: {Vercel / Cloudflare / Fly.io / N/A}\n- Errors: {N} | Warnings: {N}\n- Status: {CLEAN / WARN / ERRORS / N/A}\n\n### Dev Principles\n- SOLID: {PASS / violations found}\n- Schemas-first: {YES / raw data found}\n- Error handling: {PASS / issues found}\n- Status: {PASS / WARN / FAIL}\n\n### Commits\n- Total: {N} | Conventional: {N}/{M}\n- Atomic: {YES / NO}\n- Plan SHAs: {N}/{M}\n- Status: {PASS / WARN / FAIL}\n\n### Documentation\n- CLAUDE.md: {CURRENT / STALE / MISSING}\n- README.md: {CURRENT / STALE / MISSING}\n- AICODE-TODO unresolved: {N}\n- Dead code: {NONE / found}\n- Status: {PASS / WARN / FAIL}\n\n### Visual Testing\n- Platform: {browser / simulator / emulator / N/A}\n- Pages/screens: {N}\n- Console errors: {N}\n- Visual issues: {NONE / list}\n- Status: {PASS / WARN / FAIL / N/A}\n\n### Issues Found\n1. [{severity}] {description} — {file:line}\n2. [{severity}] {description} — {file:line}\n\n### Recommendations\n- {actionable recommendation}\n- {actionable recommendation}\n\n\nVerdict logic:\n\nSHIP: All tests pass, no security issues, acceptance criteria met, build succeeds, production logs clean, docs current, commits atomic, no critical visual issues\nFIX FIRST: Minor issues (warnings, partial criteria, low-severity vulns, intermittent log errors, stale docs, non-conventional commits, minor SOLID violations, minor visual issues like layout overflow) — list what to fix\nBLOCK: Failing tests, security vulnerabilities, missing critical features, production crashes in logs, missing CLAUDE.md/README.md, critical architecture violations, app crashes on launch (simulator/emulator) — do not ship\nPost-Verdict: CLAUDE.md Revision\n\nAfter the verdict report, revise the project's CLAUDE.md to keep it lean and useful for future agents.\n\nSteps:\nRead CLAUDE.md and check size: wc -c CLAUDE.md\nAdd learnings from this review:\nNew Do/Don't rules discovered during review\nUpdated commands, workflows, or architecture decisions\nFixed issues or gotchas worth remembering\nStack/dependency changes (new packages, removed deps)\nIf over 40,000 characters — trim ruthlessly:\nCollapse completed phase/milestone histories into one line each\nRemove verbose explanations — keep terse, actionable notes\nRemove duplicate info (same thing explained in multiple sections)\nRemove historical migration notes, old debugging context\nRemove examples that are obvious from code or covered by skill/doc files\nRemove outdated troubleshooting for resolved issues\nVerify result ≤ 40,000 characters — if still over, cut least actionable content\nWrite updated CLAUDE.md, update \"Last updated\" date\nPriority (keep → cut):\nALWAYS KEEP: Tech stack, directory structure, Do/Don't rules, common commands, architecture decisions\nKEEP: Workflow instructions, troubleshooting for active issues, key file references\nCONDENSE: Phase histories (one line each), detailed examples, tool/MCP listings\nCUT FIRST: Historical notes, verbose explanations, duplicated content, resolved issues\nRules:\nNever remove Do/Don't sections — critical guardrails\nPreserve overall section structure and ordering\nEvery line must earn its place: \"would a future agent need this to do their job?\"\nCommit the update: git add CLAUDE.md && git commit -m \"docs: revise CLAUDE.md (post-review)\"\nAFTER CLAUDE.md revision — output signal EXACTLY ONCE:\n\nOutput pipeline signal ONLY if pipeline state directory (.solo/states/) exists.\n\nOutput the signal tag ONCE and ONLY ONCE. Do not repeat it. The pipeline detects the first occurrence.\n\nIf SHIP: output this exact line (once):\n\n<solo:done/>\n\n\nIf FIX FIRST or BLOCK:\n\nOpen plan.md and APPEND a new phase with fix tasks (one - [ ] Task per issue found)\nChange plan.md status from [x] Complete to [~] In Progress\nCommit: git add docs/plan/ && git commit -m \"fix: add review fix tasks\"\nOutput this exact line (once):\n<solo:redo/>\n\n\nThe pipeline reads these tags and handles all marker files automatically. You do NOT need to create or delete any marker files yourself. Output the signal tag once — the pipeline detects the first occurrence.\n\nError Handling\nTests won't run\n\nCause: Missing dependencies or test config. Fix: Run npm install / uv sync, check test config exists (jest.config, pytest.ini).\n\nLinter not configured\n\nCause: No linter config file found. Fix: Note as a recommendation in the report, not a blocker.\n\nBuild fails\n\nCause: Type errors, import issues, missing env vars. Fix: Report specific errors. This is a BLOCK verdict — must fix before shipping.\n\nTwo-Stage Review Pattern\n\nWhen reviewing significant work, use two stages:\n\nStage 1 — Spec Compliance:\n\nDoes the implementation match spec.md requirements?\nAre all acceptance criteria actually met (not just claimed)?\nAny deviations from the plan? If so, are they justified improvements or problems?\n\nStage 2 — Code Quality:\n\nArchitecture patterns, error handling, type safety\nTest coverage and test quality\nSecurity and performance\nCode organization and maintainability\nVerification Gate\n\nNo verdict without fresh evidence.\n\nBefore writing any verdict (SHIP/FIX/BLOCK):\n\nRun the actual test/build/lint commands (not cached results).\nRead full output — exit codes, pass/fail counts, error messages.\nConfirm the output matches your claim.\nOnly then write the verdict with evidence.\n\nNever write \"tests should pass\" — run them and show the output.\n\nRationalizations Catalog\nThought\tReality\n\"Tests were passing earlier\"\tRun them NOW. Code changed since then.\n\"It's just a warning\"\tWarnings become bugs. Report them.\n\"The build worked locally\"\tCheck the platform too. Environment differences matter.\n\"Security scan is overkill\"\tOne missed secret = data breach. Always scan.\n\"Good enough to ship\"\tQuantify \"good enough\". Show the numbers.\n\"I already checked this\"\tFresh evidence only. Stale checks are worthless.\nCritical Rules\nRun all checks — do not skip dimensions even if project seems simple.\nBe specific — always include file:line references for issues.\nVerdict must be justified — every SHIP/FIX/BLOCK needs evidence from actual commands.\nDon't auto-fix code — report issues and add fix tasks to plan.md. Let /build fix them. Review only modifies plan.md, never source code.\nCheck acceptance criteria — spec.md is the source of truth for \"done\".\nSecurity is non-negotiable — any hardcoded secret = BLOCK.\nFresh evidence only — run commands before making claims. Never rely on memory."
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/fortunto2/solo-review",
    "publisherUrl": "https://clawhub.ai/fortunto2/solo-review",
    "owner": "fortunto2",
    "version": "1.1.1",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/solo-review",
    "downloadUrl": "https://openagent3.xyz/downloads/solo-review",
    "agentUrl": "https://openagent3.xyz/skills/solo-review/agent",
    "manifestUrl": "https://openagent3.xyz/skills/solo-review/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/solo-review/agent.md"
  }
}