{
  "schemaVersion": "1.0",
  "item": {
    "slug": "testing-workflow",
    "name": "Testing Workflow",
    "source": "tencent",
    "type": "skill",
    "category": "开发工具",
    "sourceUrl": "https://clawhub.ai/wpank/testing-workflow",
    "canonicalUrl": "https://clawhub.ai/wpank/testing-workflow",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/testing-workflow",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=testing-workflow",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "README.md",
      "SKILL.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "slug": "testing-workflow",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-05-09T22:39:27.823Z",
      "expiresAt": "2026-05-16T22:39:27.823Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=testing-workflow",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=testing-workflow",
        "contentDisposition": "attachment; filename=\"testing-workflow-0.1.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null,
        "slug": "testing-workflow"
      },
      "scope": "item",
      "summary": "Item download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this item.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/testing-workflow"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/testing-workflow",
    "agentPageUrl": "https://openagent3.xyz/skills/testing-workflow/agent",
    "manifestUrl": "https://openagent3.xyz/skills/testing-workflow/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/testing-workflow/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Testing Workflow",
        "body": "Orchestrate comprehensive testing across a project by coordinating the testing-patterns skill, e2e-testing skill, and testing agents. This meta-skill does not define test patterns itself — it routes to the right skill or agent at each stage and ensures nothing is missed."
      },
      {
        "title": "When to Use",
        "body": "Setting up testing for a new project from scratch\nImproving coverage for an existing project with gaps\nEstablishing or revising a testing strategy\nBefore a major release to verify quality gates are met\nAfter a large refactor to confirm nothing broke\nDuring code review when test adequacy is in question\nOnboarding a team to a testing workflow"
      },
      {
        "title": "Orchestration Flow",
        "body": "Follow these steps in order. Each step routes to a specific skill or agent — read and apply that resource before moving to the next step."
      },
      {
        "title": "Phase 1: Discovery and Baseline",
        "body": "Scan the project to understand existing test infrastructure, measure current coverage, and identify gaps before making changes. Without a baseline, you cannot demonstrate improvement.\n\nIdentify test infrastructure — Determine the test runner, assertion library, coverage tool, and CI configuration already in use. If none exist, flag that setup is needed.\nMeasure current coverage — Run the existing test suite and record statement, branch, and function coverage. This is the baseline.\nMap untested code — Identify modules, functions, and code paths with no test coverage. Prioritize by risk: business-critical logic first, utilities last.\nCatalog existing tests — Categorize existing tests as unit, integration, or E2E. Check for skipped tests, flaky tests, and tests that don't assert anything meaningful."
      },
      {
        "title": "Phase 2: Strategy Selection",
        "body": "Based on the discovery results, select the appropriate testing approach for this project.\n\nDetermine project type — Use the Coverage Targets table below to set appropriate thresholds for the project type.\nSelect test patterns — Read ai/skills/testing/testing-patterns/SKILL.md and choose the unit/integration test patterns that match the project's architecture, language, and framework.\nIdentify critical user journeys — List the 3-10 most important user workflows that require E2E coverage. These are flows where a failure would directly impact revenue, user trust, or safety.\nDocument the strategy — Fill in the Testing Strategy Template (below) and commit it to the repository."
      },
      {
        "title": "Phase 3: Implementation",
        "body": "Generate tests following the patterns selected in Phase 2.\n\nUnit tests first — Write unit tests for uncovered business logic, starting with the highest-risk modules. Follow the testing pyramid: ~70% of your tests should be unit tests.\nIntegration tests next — Write integration tests for module boundaries, API endpoints, and database queries. Focus on the seams where components interact.\nE2E tests for critical journeys — Read ai/skills/testing/e2e-testing/SKILL.md and write E2E tests for each critical user journey identified in Phase 2.\nEdge case coverage — After the happy paths are covered, add tests for error conditions, boundary values, null/empty inputs, and concurrency scenarios."
      },
      {
        "title": "Phase 4: Validation",
        "body": "Verify that the new tests meet quality standards and coverage targets.\n\nRun the full test suite — Every test must pass. Fix failures before proceeding.\nMeasure coverage against targets — Compare new coverage against the thresholds for the project type. If targets are not met, return to Phase 3.\nCheck test quality — Review tests for the anti-patterns listed in testing-patterns (assert-free tests, overmocking, flaky tests, test pollution). Fix any found.\nVerify CI integration — Confirm that tests run automatically on every push/PR and that coverage thresholds are enforced in CI."
      },
      {
        "title": "Phase 5: Maintenance",
        "body": "Establish ongoing practices to keep the test suite healthy.\n\nSet up coverage ratcheting — Configure CI to fail if coverage drops below the current level. Coverage should only go up.\nEstablish flaky test policy — Any test that fails intermittently must be fixed within one sprint or removed with a justification.\nDefine test review standards — Every PR that adds or changes logic must include corresponding test changes. Reviewers check for this.\nSchedule test health audits — Quarterly, review test execution time, flaky test rate, skipped test count, and coverage trends."
      },
      {
        "title": "Skill Routing Table",
        "body": "Use this table to route specific needs to the correct resource:\n\nNeedRoute ToPathUnit/integration test patternstesting-patternsai/skills/testing/testing-patterns/SKILL.mdE2E test patternse2e-testingai/skills/testing/e2e-testing/SKILL.mdCode quality standardsclean-codeai/skills/testing/clean-code/SKILL.mdReview checklistcode-reviewai/skills/testing/code-review/SKILL.mdCI/CD quality gatesquality-gatesai/skills/testing/quality-gates/SKILL.mdDebugging test failuresdebuggingai/skills/testing/debugging/SKILL.md\n\nWhen a request falls clearly into one row, go directly to that resource. Use the full orchestration flow only when comprehensive coverage is the goal."
      },
      {
        "title": "Coverage Targets",
        "body": "Targets vary by project type. Use the appropriate row to set expectations:\n\nProject TypeStatementBranchFunctionE2E JourneysNotesStartup MVP60%50%60%Top 3 flowsFocus on critical paths onlyProduction App80%70%80%Top 10 flowsBalance speed with confidenceLibrary / Package90%85%95%N/APublic API must be fully coveredCritical Infrastructure95%90%95%All flowsZero tolerance for gaps\n\nThese are minimums. Aim higher when time permits, but do not block releases on vanity metrics — prioritize meaningful coverage over percentage points."
      },
      {
        "title": "Testing Strategy Template",
        "body": "Use this template to document the testing strategy for a project. Fill it in during the orchestration flow and keep it in the repo.\n\n# Testing Strategy\n\n## Project Overview\n- **Project**: [name]\n- **Type**: [startup MVP | production app | library | critical infrastructure]\n- **Primary Language**: [language]\n- **Framework**: [framework]\n- **Test Runner**: [runner]\n- **Coverage Tool**: [tool]\n\n## Coverage Baseline\n- **Statement**: [X%]\n- **Branch**: [X%]\n- **Function**: [X%]\n- **E2E Journeys Covered**: [N of M]\n- **Date Measured**: [YYYY-MM-DD]\n\n## Coverage Targets\n- **Statement**: [target%]\n- **Branch**: [target%]\n- **Function**: [target%]\n- **E2E Journeys**: [target count]\n\n## Test Patterns Selected\n- [ ] [Pattern 1 — reason for selection]\n- [ ] [Pattern 2 — reason for selection]\n- [ ] [Pattern 3 — reason for selection]\n\n## Critical User Journeys (E2E)\n1. [Journey 1 — e.g., signup -> onboarding -> first action]\n2. [Journey 2 — e.g., login -> dashboard -> export]\n3. [Journey 3 — e.g., checkout -> payment -> confirmation]\n\n## Gaps and Risks\n- [Untested area 1 — risk level, mitigation plan]\n- [Untested area 2 — risk level, mitigation plan]\n\n## Quality Gate Status\n- [ ] All tests pass\n- [ ] Coverage targets met\n- [ ] Critical journeys covered with E2E\n- [ ] No skipped tests without justification\n- [ ] Test execution time within budget\n- [ ] CI enforces coverage thresholds"
      },
      {
        "title": "Quality Gates for Testing Completion",
        "body": "All of the following must be satisfied before marking testing complete:\n\nGateRequirementWhyAll tests passZero failures, zero errorsFlaky tests count as failuresCoverage targets metStatement, branch, and function coverage meet project-type thresholdsUntested code is unverified codeCritical journeys coveredEvery critical user journey has a passing E2E testRevenue and trust depend on these flowsNo unjustified skipsEvery skip, xit, or xdescribe has a comment and linked issueSkipped tests rot into permanent gapsExecution time budgetUnit < 60s, E2E < 10minSlow suites get skipped by developersNo test pollutionRunning any test file alone produces same results as full suiteShared state masks failuresMocks are justifiedEvery mock has a comment explaining why the real impl cannot be usedOver-mocking hides real bugs"
      },
      {
        "title": "NEVER Do",
        "body": "NEVER write tests that test implementation details instead of behavior — tests must verify what the code does, not how it does it\nNEVER skip the discovery phase — always measure the baseline before writing new tests, or you cannot demonstrate improvement\nNEVER merge tests that depend on execution order — each test must be independent and idempotent\nNEVER mock what you do not own — wrap third-party dependencies in your own adapters and mock the adapters instead\nNEVER treat coverage percentage as the sole quality metric — 100% coverage with weak assertions is worse than 70% coverage with strong assertions\nNEVER leave the test suite in a failing state — if a test fails, fix it or remove it with a justification before moving on\nNEVER skip E2E tests for critical user journeys — unit tests alone cannot catch integration failures in flows that matter most\nNEVER deploy without running the full test suite — partial test runs create false confidence"
      }
    ],
    "body": "Testing Workflow\n\nOrchestrate comprehensive testing across a project by coordinating the testing-patterns skill, e2e-testing skill, and testing agents. This meta-skill does not define test patterns itself — it routes to the right skill or agent at each stage and ensures nothing is missed.\n\nWhen to Use\nSetting up testing for a new project from scratch\nImproving coverage for an existing project with gaps\nEstablishing or revising a testing strategy\nBefore a major release to verify quality gates are met\nAfter a large refactor to confirm nothing broke\nDuring code review when test adequacy is in question\nOnboarding a team to a testing workflow\nOrchestration Flow\n\nFollow these steps in order. Each step routes to a specific skill or agent — read and apply that resource before moving to the next step.\n\nPhase 1: Discovery and Baseline\n\nScan the project to understand existing test infrastructure, measure current coverage, and identify gaps before making changes. Without a baseline, you cannot demonstrate improvement.\n\nIdentify test infrastructure — Determine the test runner, assertion library, coverage tool, and CI configuration already in use. If none exist, flag that setup is needed.\nMeasure current coverage — Run the existing test suite and record statement, branch, and function coverage. This is the baseline.\nMap untested code — Identify modules, functions, and code paths with no test coverage. Prioritize by risk: business-critical logic first, utilities last.\nCatalog existing tests — Categorize existing tests as unit, integration, or E2E. Check for skipped tests, flaky tests, and tests that don't assert anything meaningful.\nPhase 2: Strategy Selection\n\nBased on the discovery results, select the appropriate testing approach for this project.\n\nDetermine project type — Use the Coverage Targets table below to set appropriate thresholds for the project type.\nSelect test patterns — Read ai/skills/testing/testing-patterns/SKILL.md and choose the unit/integration test patterns that match the project's architecture, language, and framework.\nIdentify critical user journeys — List the 3-10 most important user workflows that require E2E coverage. These are flows where a failure would directly impact revenue, user trust, or safety.\nDocument the strategy — Fill in the Testing Strategy Template (below) and commit it to the repository.\nPhase 3: Implementation\n\nGenerate tests following the patterns selected in Phase 2.\n\nUnit tests first — Write unit tests for uncovered business logic, starting with the highest-risk modules. Follow the testing pyramid: ~70% of your tests should be unit tests.\nIntegration tests next — Write integration tests for module boundaries, API endpoints, and database queries. Focus on the seams where components interact.\nE2E tests for critical journeys — Read ai/skills/testing/e2e-testing/SKILL.md and write E2E tests for each critical user journey identified in Phase 2.\nEdge case coverage — After the happy paths are covered, add tests for error conditions, boundary values, null/empty inputs, and concurrency scenarios.\nPhase 4: Validation\n\nVerify that the new tests meet quality standards and coverage targets.\n\nRun the full test suite — Every test must pass. Fix failures before proceeding.\nMeasure coverage against targets — Compare new coverage against the thresholds for the project type. If targets are not met, return to Phase 3.\nCheck test quality — Review tests for the anti-patterns listed in testing-patterns (assert-free tests, overmocking, flaky tests, test pollution). Fix any found.\nVerify CI integration — Confirm that tests run automatically on every push/PR and that coverage thresholds are enforced in CI.\nPhase 5: Maintenance\n\nEstablish ongoing practices to keep the test suite healthy.\n\nSet up coverage ratcheting — Configure CI to fail if coverage drops below the current level. Coverage should only go up.\nEstablish flaky test policy — Any test that fails intermittently must be fixed within one sprint or removed with a justification.\nDefine test review standards — Every PR that adds or changes logic must include corresponding test changes. Reviewers check for this.\nSchedule test health audits — Quarterly, review test execution time, flaky test rate, skipped test count, and coverage trends.\nSkill Routing Table\n\nUse this table to route specific needs to the correct resource:\n\nNeed\tRoute To\tPath\nUnit/integration test patterns\ttesting-patterns\tai/skills/testing/testing-patterns/SKILL.md\nE2E test patterns\te2e-testing\tai/skills/testing/e2e-testing/SKILL.md\nCode quality standards\tclean-code\tai/skills/testing/clean-code/SKILL.md\nReview checklist\tcode-review\tai/skills/testing/code-review/SKILL.md\nCI/CD quality gates\tquality-gates\tai/skills/testing/quality-gates/SKILL.md\nDebugging test failures\tdebugging\tai/skills/testing/debugging/SKILL.md\n\nWhen a request falls clearly into one row, go directly to that resource. Use the full orchestration flow only when comprehensive coverage is the goal.\n\nCoverage Targets\n\nTargets vary by project type. Use the appropriate row to set expectations:\n\nProject Type\tStatement\tBranch\tFunction\tE2E Journeys\tNotes\nStartup MVP\t60%\t50%\t60%\tTop 3 flows\tFocus on critical paths only\nProduction App\t80%\t70%\t80%\tTop 10 flows\tBalance speed with confidence\nLibrary / Package\t90%\t85%\t95%\tN/A\tPublic API must be fully covered\nCritical Infrastructure\t95%\t90%\t95%\tAll flows\tZero tolerance for gaps\n\nThese are minimums. Aim higher when time permits, but do not block releases on vanity metrics — prioritize meaningful coverage over percentage points.\n\nTesting Strategy Template\n\nUse this template to document the testing strategy for a project. Fill it in during the orchestration flow and keep it in the repo.\n\n# Testing Strategy\n\n## Project Overview\n- **Project**: [name]\n- **Type**: [startup MVP | production app | library | critical infrastructure]\n- **Primary Language**: [language]\n- **Framework**: [framework]\n- **Test Runner**: [runner]\n- **Coverage Tool**: [tool]\n\n## Coverage Baseline\n- **Statement**: [X%]\n- **Branch**: [X%]\n- **Function**: [X%]\n- **E2E Journeys Covered**: [N of M]\n- **Date Measured**: [YYYY-MM-DD]\n\n## Coverage Targets\n- **Statement**: [target%]\n- **Branch**: [target%]\n- **Function**: [target%]\n- **E2E Journeys**: [target count]\n\n## Test Patterns Selected\n- [ ] [Pattern 1 — reason for selection]\n- [ ] [Pattern 2 — reason for selection]\n- [ ] [Pattern 3 — reason for selection]\n\n## Critical User Journeys (E2E)\n1. [Journey 1 — e.g., signup -> onboarding -> first action]\n2. [Journey 2 — e.g., login -> dashboard -> export]\n3. [Journey 3 — e.g., checkout -> payment -> confirmation]\n\n## Gaps and Risks\n- [Untested area 1 — risk level, mitigation plan]\n- [Untested area 2 — risk level, mitigation plan]\n\n## Quality Gate Status\n- [ ] All tests pass\n- [ ] Coverage targets met\n- [ ] Critical journeys covered with E2E\n- [ ] No skipped tests without justification\n- [ ] Test execution time within budget\n- [ ] CI enforces coverage thresholds\n\nQuality Gates for Testing Completion\n\nAll of the following must be satisfied before marking testing complete:\n\nGate\tRequirement\tWhy\nAll tests pass\tZero failures, zero errors\tFlaky tests count as failures\nCoverage targets met\tStatement, branch, and function coverage meet project-type thresholds\tUntested code is unverified code\nCritical journeys covered\tEvery critical user journey has a passing E2E test\tRevenue and trust depend on these flows\nNo unjustified skips\tEvery skip, xit, or xdescribe has a comment and linked issue\tSkipped tests rot into permanent gaps\nExecution time budget\tUnit < 60s, E2E < 10min\tSlow suites get skipped by developers\nNo test pollution\tRunning any test file alone produces same results as full suite\tShared state masks failures\nMocks are justified\tEvery mock has a comment explaining why the real impl cannot be used\tOver-mocking hides real bugs\nNEVER Do\nNEVER write tests that test implementation details instead of behavior — tests must verify what the code does, not how it does it\nNEVER skip the discovery phase — always measure the baseline before writing new tests, or you cannot demonstrate improvement\nNEVER merge tests that depend on execution order — each test must be independent and idempotent\nNEVER mock what you do not own — wrap third-party dependencies in your own adapters and mock the adapters instead\nNEVER treat coverage percentage as the sole quality metric — 100% coverage with weak assertions is worse than 70% coverage with strong assertions\nNEVER leave the test suite in a failing state — if a test fails, fix it or remove it with a justification before moving on\nNEVER skip E2E tests for critical user journeys — unit tests alone cannot catch integration failures in flows that matter most\nNEVER deploy without running the full test suite — partial test runs create false confidence"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/wpank/testing-workflow",
    "publisherUrl": "https://clawhub.ai/wpank/testing-workflow",
    "owner": "wpank",
    "version": "0.1.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/testing-workflow",
    "downloadUrl": "https://openagent3.xyz/downloads/testing-workflow",
    "agentUrl": "https://openagent3.xyz/skills/testing-workflow/agent",
    "manifestUrl": "https://openagent3.xyz/skills/testing-workflow/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/testing-workflow/agent.md"
  }
}