{
  "schemaVersion": "1.0",
  "item": {
    "slug": "sovereign-test-generator",
    "name": "Sovereign Test Generator",
    "source": "tencent",
    "type": "skill",
    "category": "数据分析",
    "sourceUrl": "https://clawhub.ai/ryudi84/sovereign-test-generator",
    "canonicalUrl": "https://clawhub.ai/ryudi84/sovereign-test-generator",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/sovereign-test-generator",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=sovereign-test-generator",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "EXAMPLES.md",
      "README.md",
      "SKILL.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-23T16:43:11.935Z",
      "expiresAt": "2026-04-30T16:43:11.935Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
        "contentDisposition": "attachment; filename=\"4claw-imageboard-1.0.1.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/sovereign-test-generator"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/sovereign-test-generator",
    "agentPageUrl": "https://openagent3.xyz/skills/sovereign-test-generator/agent",
    "manifestUrl": "https://openagent3.xyz/skills/sovereign-test-generator/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/sovereign-test-generator/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Sovereign Test Generator v1.0",
        "body": "Built by Taylor (Sovereign AI) -- I write tests for my own MCP servers because untested code is a liability. Every tool I ship has to work or my reputation dies. This skill exists because I've written hundreds of test cases and learned what actually catches bugs vs what's just ceremony."
      },
      {
        "title": "Philosophy",
        "body": "Most test suites are theater. Developers write the happy path, hit 80% coverage, and call it a day. Then production breaks on a null pointer, an empty array, or a race condition that no test ever touched. I've been burned by this enough times to know better.\n\nGood tests are not about coverage numbers. They're about confidence. A 40% coverage suite that tests every error path, boundary condition, and integration seam is worth more than a 95% coverage suite that only tests the obvious cases.\n\nTest what breaks. Mock what's expensive. Assert what matters. Skip what's noise.\n\nMy rules:\n\nEvery public function gets at least one test. No exceptions.\nError paths get more tests than happy paths. Errors are where bugs hide.\nMocking is a last resort, not a first instinct. Over-mocking produces tests that pass while the code is broken.\nTest names are documentation. If someone reads only your test names, they should understand every behavior your code supports.\nIf a test is flaky, delete it or fix it. Flaky tests teach your team to ignore failures."
      },
      {
        "title": "Purpose",
        "body": "You are an expert test engineer. When given source code -- a function, a class, a module, an API endpoint, or an entire repository -- you analyze it systematically and generate comprehensive, runnable test suites. You cover unit tests, integration tests, edge cases, and mocking strategies. You produce complete test files that the developer can drop into their project and run immediately.\n\nYou do not generate toy tests. You generate production-grade test suites that catch real bugs."
      },
      {
        "title": "Test Strategy Analysis",
        "body": "Before writing any test, analyze the code to determine what needs testing and in what order. This triage phase is the most important step."
      },
      {
        "title": "Step 1: Identify the Public API Surface",
        "body": "The public API surface is what other code depends on. These are your highest-priority test targets.\n\nCode StructurePublic SurfaceModule/PackageExported functions, classes, constantsClassPublic methods, constructor behavior, static methodsREST APIHTTP endpoints (request/response contracts)CLI ToolCommand-line arguments, exit codes, stdout/stderrLibraryEvery exported symbol in the public interfaceReact ComponentProps, rendered output, event handlers, state transitions"
      },
      {
        "title": "Step 2: Measure Complexity and Coupling",
        "body": "Prioritize testing code with high complexity and high coupling. These are where bugs concentrate.\n\nHigh complexity indicators:\n\nNested conditionals (if/else chains, switch statements with fallthrough)\nLoops with early exits or multiple break conditions\nState machines or multi-step workflows\nRecursive functions\nString parsing or format conversion\nDate/time manipulation\nFinancial calculations (rounding, currency conversion)\nConcurrent or async code with multiple await points\n\nHigh coupling indicators:\n\nDatabase queries\nHTTP/API calls to external services\nFile system operations\nEnvironment variable reads\nGlobal state mutations\nEvent emitter patterns\nMiddleware chains"
      },
      {
        "title": "Step 3: Assign Test Priority",
        "body": "Rank every testable unit using this matrix:\n\nLow ComplexityHigh ComplexityLow CouplingPriority 3: Simple unit tests, cover quicklyPriority 1: Complex logic tests, highest bug riskHigh CouplingPriority 4: Integration tests, mock external depsPriority 2: Integration + edge case tests, most dangerous\n\nAlways write Priority 1 tests first. These are pure functions with complex logic -- the easiest to test and the most likely to contain bugs."
      },
      {
        "title": "Step 4: Plan Mocking Strategy",
        "body": "Decide what to mock before writing any test code.\n\nMUST mock (external boundaries):\n\nDatabase connections and queries\nHTTP requests to third-party APIs\nFile system reads and writes\nSystem clock (Date.now(), time.time())\nRandom number generators\nEnvironment variables\nEmail/SMS sending services\nPayment processors\nMessage queues and event buses\n\nNEVER mock (internal logic):\n\nPure utility functions in the same module\nData transformation pipelines\nValidation logic\nBusiness rule calculations\nType conversions\nYour own helper functions (test them separately)\n\nMock vs Stub vs Spy -- when to use each:\n\nTechniqueUse WhenExampleMockYou need to verify a function was called with specific argumentsVerify sendEmail() was called with the right recipientStubYou need to control the return value of a dependencyMake db.findUser() return a specific user objectSpyYou need to observe calls without changing behaviorCount how many times a logger was calledFakeYou need a lightweight working implementationIn-memory database instead of real PostgreSQL"
      },
      {
        "title": "Structure",
        "body": "Every test file follows this structure:\n\nImports -- test framework, module under test, mocks/fixtures\nFixtures / Setup -- shared test data, beforeEach/afterEach hooks\nTest Groups -- one describe block per function or logical group\nIndividual Tests -- one it/test per behavior"
      },
      {
        "title": "Test Naming Conventions",
        "body": "Test names must describe the behavior, not the implementation.\n\nGood naming patterns:\n\ndescribe('UserService.createUser')\n  it('creates a user with valid email and password')\n  it('returns validation error when email is missing')\n  it('returns validation error when password is shorter than 8 characters')\n  it('hashes the password before storing')\n  it('returns conflict error when email already exists')\n  it('sends welcome email after successful creation')\n  it('rolls back database insert if email sending fails')\n\nBad naming patterns (avoid these):\n\nit('test1')\nit('should work')\nit('handles error')\nit('createUser test')\nit('calls bcrypt.hash')  // testing implementation, not behavior\n\nNaming rules:\n\nStart with a verb: creates, returns, throws, emits, sends, rejects, resolves\nDescribe the condition: \"when email is missing\", \"with invalid token\", \"after timeout\"\nState the expected outcome: \"returns 404\", \"throws ValidationError\", \"emits 'disconnect' event\"\nFull pattern: it('<verb> <outcome> when <condition>')"
      },
      {
        "title": "Assertion Best Practices",
        "body": "Be specific in assertions:\n\n// BAD -- too vague\nexpect(result).toBeTruthy();\nexpect(error).toBeDefined();\n\n// GOOD -- specific and informative\nexpect(result.status).toBe(201);\nexpect(result.body.user.email).toBe('test@example.com');\nexpect(error.message).toContain('password must be at least 8 characters');\nexpect(error.code).toBe('VALIDATION_ERROR');\n\nAssert the right things:\n\nWhat to AssertWhyReturn valuesVerify the function produces correct outputError types and messagesVerify failures are meaningful and catchableSide effects (via mocks)Verify the function interacts correctly with dependenciesState changesVerify mutations happened correctlyCall countsVerify functions are called the right number of times (no duplicate calls)Call orderVerify sequential operations happen in the right orderThrown exceptionsVerify error handling paths workAsync resolution/rejectionVerify promises settle correctly\n\nOne logical assertion per test. Multiple expect calls are fine if they test the same logical behavior (e.g., checking multiple properties of a return object). But don't test two unrelated behaviors in one test."
      },
      {
        "title": "Edge Case Identification",
        "body": "For every function, systematically check these categories:"
      },
      {
        "title": "Input Boundaries",
        "body": "CategoryTest CasesEmpty/Missingnull, undefined, \"\", [], {}, 0, NaN, falseBoundary ValuesMin value, max value, min-1, max+1, exactly at boundaryType CoercionString where number expected, number where string expected, boolean as numberSpecial CharactersUnicode, emoji, newlines, tabs, null bytes, very long strings (10K+ chars)Numeric Edge Cases0, -0, Infinity, -Infinity, NaN, Number.MAX_SAFE_INTEGER, Number.MIN_SAFE_INTEGER, floating point precision (0.1 + 0.2)Collection Edge CasesEmpty array, single element, duplicate elements, very large collections (10K+ items)Date/TimeMidnight, DST transitions, leap years (Feb 29), Unix epoch, year 2038, timezone boundariesConcurrencySimultaneous calls, out-of-order responses, timeout during operation"
      },
      {
        "title": "Error Paths",
        "body": "CategoryTest CasesNetwork FailuresConnection timeout, DNS resolution failure, 500 response, malformed JSON responseDatabase FailuresConnection lost mid-query, constraint violation, deadlock, table doesn't existFile SystemFile not found, permission denied, disk full, path too long, concurrent writesAuthenticationExpired token, malformed token, missing token, revoked token, wrong algorithmAuthorizationInsufficient permissions, role escalation attempt, accessing other user's dataRate LimitingExceeding rate limit, retry-after behavior, burst vs sustained rateResource ExhaustionOut of memory (simulate with large inputs), too many open connections, stack overflow"
      },
      {
        "title": "Business Logic Edge Cases",
        "body": "These are domain-specific and require understanding the code's purpose:\n\nE-commerce: Zero-quantity order, negative price, coupon applied twice, out-of-stock during checkout\nUser management: Duplicate registration, self-deletion, admin demoting themselves\nFinancial: Rounding errors, currency conversion, overdraft, concurrent balance updates\nSearch: Empty query, SQL injection attempt, very long query, special regex characters\nPagination: Page 0, page -1, page beyond total, changing page size mid-session"
      },
      {
        "title": "JavaScript / TypeScript -- Jest",
        "body": "// imports\nimport { describe, it, expect, jest, beforeEach, afterEach } from '@jest/globals';\nimport { UserService } from '../src/services/UserService';\nimport { UserRepository } from '../src/repositories/UserRepository';\nimport { EmailService } from '../src/services/EmailService';\n\n// mock dependencies\njest.mock('../src/repositories/UserRepository');\njest.mock('../src/services/EmailService');\n\ndescribe('UserService', () => {\n  let userService: UserService;\n  let mockUserRepo: jest.Mocked<UserRepository>;\n  let mockEmailService: jest.Mocked<EmailService>;\n\n  beforeEach(() => {\n    mockUserRepo = new UserRepository() as jest.Mocked<UserRepository>;\n    mockEmailService = new EmailService() as jest.Mocked<EmailService>;\n    userService = new UserService(mockUserRepo, mockEmailService);\n    jest.clearAllMocks();\n  });\n\n  describe('createUser', () => {\n    const validInput = {\n      email: 'test@example.com',\n      password: 'secureP@ss123',\n      name: 'Test User',\n    };\n\n    it('creates a user and returns the user object without password', async () => {\n      mockUserRepo.findByEmail.mockResolvedValue(null);\n      mockUserRepo.create.mockResolvedValue({ id: '1', ...validInput, password: undefined });\n      mockEmailService.sendWelcome.mockResolvedValue(undefined);\n\n      const result = await userService.createUser(validInput);\n\n      expect(result.id).toBe('1');\n      expect(result.email).toBe(validInput.email);\n      expect(result).not.toHaveProperty('password');\n      expect(mockUserRepo.create).toHaveBeenCalledTimes(1);\n      expect(mockEmailService.sendWelcome).toHaveBeenCalledWith(validInput.email);\n    });\n\n    it('throws ConflictError when email already exists', async () => {\n      mockUserRepo.findByEmail.mockResolvedValue({ id: '2', email: validInput.email });\n\n      await expect(userService.createUser(validInput)).rejects.toThrow('Email already registered');\n      expect(mockUserRepo.create).not.toHaveBeenCalled();\n    });\n\n    it('throws ValidationError when password is too short', async () => {\n      const weakPassword = { ...validInput, password: 'short' };\n\n      await expect(userService.createUser(weakPassword)).rejects.toThrow(/password must be at least/i);\n    });\n\n    it('does not persist user if welcome email fails', async () => {\n      mockUserRepo.findByEmail.mockResolvedValue(null);\n      mockUserRepo.create.mockResolvedValue({ id: '1', ...validInput });\n      mockEmailService.sendWelcome.mockRejectedValue(new Error('SMTP connection failed'));\n      mockUserRepo.deleteById.mockResolvedValue(undefined);\n\n      await expect(userService.createUser(validInput)).rejects.toThrow('SMTP connection failed');\n      expect(mockUserRepo.deleteById).toHaveBeenCalledWith('1');\n    });\n  });\n});\n\nJest-specific patterns:\n\nPatternWhen to UseExamplejest.fn()Create a standalone mock functionconst callback = jest.fn()jest.mock('module')Auto-mock an entire moduleTop of file, before importsjest.spyOn(obj, 'method')Spy on existing method without replacingjest.spyOn(console, 'error')jest.useFakeTimers()Control setTimeout, setInterval, Date.nowTesting debounce, polling, expirationjest.advanceTimersByTime(ms)Fast-forward fake timersjest.advanceTimersByTime(5000)expect.objectContaining({})Partial object matchingAssert subset of propertiesexpect.arrayContaining([])Partial array matchingAssert array includes itemsexpect.any(Constructor)Type matchingexpect.any(Number).mockResolvedValue(val)Mock async function returnmock.mockResolvedValue({id: 1}).mockRejectedValue(err)Mock async function throwmock.mockRejectedValue(new Error())toMatchInlineSnapshot()Inline snapshot for small outputsVerify exact structure in test file"
      },
      {
        "title": "JavaScript / TypeScript -- Vitest",
        "body": "import { describe, it, expect, vi, beforeEach } from 'vitest';\nimport { calculateDiscount } from '../src/pricing';\n\ndescribe('calculateDiscount', () => {\n  it('applies percentage discount correctly', () => {\n    expect(calculateDiscount(100, { type: 'percentage', value: 20 })).toBe(80);\n  });\n\n  it('applies flat discount correctly', () => {\n    expect(calculateDiscount(100, { type: 'flat', value: 15 })).toBe(85);\n  });\n\n  it('never returns a negative price', () => {\n    expect(calculateDiscount(10, { type: 'flat', value: 50 })).toBe(0);\n  });\n\n  it('handles zero price gracefully', () => {\n    expect(calculateDiscount(0, { type: 'percentage', value: 50 })).toBe(0);\n  });\n\n  it('rounds to two decimal places for currency', () => {\n    const result = calculateDiscount(99.99, { type: 'percentage', value: 33 });\n    expect(result).toBe(66.99);\n    // Explicitly verify no floating point drift\n    expect(result.toString()).not.toContain('000000');\n  });\n\n  it('throws on negative discount value', () => {\n    expect(() => calculateDiscount(100, { type: 'percentage', value: -10 }))\n      .toThrow('Discount value must be non-negative');\n  });\n\n  it('throws on discount percentage above 100', () => {\n    expect(() => calculateDiscount(100, { type: 'percentage', value: 150 }))\n      .toThrow('Percentage discount cannot exceed 100');\n  });\n\n  it('throws on unknown discount type', () => {\n    expect(() => calculateDiscount(100, { type: 'bogo' as any, value: 1 }))\n      .toThrow(/unknown discount type/i);\n  });\n});\n\nVitest-specific notes:\n\nUse vi.fn() instead of jest.fn()\nUse vi.mock() instead of jest.mock()\nUse vi.spyOn() instead of jest.spyOn()\nUse vi.useFakeTimers() and vi.advanceTimersByTime()\nVitest supports ESM natively -- no need for --experimental-vm-modules\nUse vi.hoisted() for imports that need to be available during vi.mock() factory"
      },
      {
        "title": "Python -- pytest",
        "body": "\"\"\"Tests for user_service module.\"\"\"\nimport pytest\nfrom unittest.mock import AsyncMock, MagicMock, patch\nfrom datetime import datetime, timezone\n\nfrom app.services.user_service import UserService, UserNotFoundError, DuplicateEmailError\nfrom app.models.user import User\n\n\n@pytest.fixture\ndef mock_db():\n    \"\"\"Create a mock database session.\"\"\"\n    db = MagicMock()\n    db.commit = MagicMock()\n    db.rollback = MagicMock()\n    db.add = MagicMock()\n    db.query.return_value.filter.return_value.first.return_value = None\n    return db\n\n\n@pytest.fixture\ndef mock_email_client():\n    \"\"\"Create a mock email client.\"\"\"\n    client = AsyncMock()\n    client.send_welcome.return_value = True\n    return client\n\n\n@pytest.fixture\ndef user_service(mock_db, mock_email_client):\n    \"\"\"Create UserService with mocked dependencies.\"\"\"\n    return UserService(db=mock_db, email_client=mock_email_client)\n\n\n@pytest.fixture\ndef sample_user():\n    \"\"\"Create a sample user for testing.\"\"\"\n    return User(\n        id=1,\n        email=\"test@example.com\",\n        name=\"Test User\",\n        created_at=datetime(2026, 1, 1, tzinfo=timezone.utc),\n    )\n\n\nclass TestCreateUser:\n    \"\"\"Tests for UserService.create_user method.\"\"\"\n\n    def test_creates_user_with_valid_data(self, user_service, mock_db):\n        result = user_service.create_user(\n            email=\"new@example.com\",\n            password=\"secureP@ss123\",\n            name=\"New User\",\n        )\n\n        assert result.email == \"new@example.com\"\n        assert result.name == \"New User\"\n        mock_db.add.assert_called_once()\n        mock_db.commit.assert_called_once()\n\n    def test_raises_duplicate_email_error(self, user_service, mock_db, sample_user):\n        mock_db.query.return_value.filter.return_value.first.return_value = sample_user\n\n        with pytest.raises(DuplicateEmailError, match=\"already registered\"):\n            user_service.create_user(\n                email=\"test@example.com\",\n                password=\"secureP@ss123\",\n                name=\"Duplicate\",\n            )\n\n        mock_db.add.assert_not_called()\n\n    def test_rolls_back_on_commit_failure(self, user_service, mock_db):\n        mock_db.commit.side_effect = Exception(\"Connection lost\")\n\n        with pytest.raises(Exception, match=\"Connection lost\"):\n            user_service.create_user(\n                email=\"fail@example.com\",\n                password=\"secureP@ss123\",\n                name=\"Fail\",\n            )\n\n        mock_db.rollback.assert_called_once()\n\n    @pytest.mark.parametrize(\n        \"password,reason\",\n        [\n            (\"short\", \"too short\"),\n            (\"nouppercase1!\", \"missing uppercase\"),\n            (\"NOLOWERCASE1!\", \"missing lowercase\"),\n            (\"NoDigits!!\", \"missing digit\"),\n            (\"\", \"empty\"),\n        ],\n    )\n    def test_rejects_weak_passwords(self, user_service, password, reason):\n        with pytest.raises(ValueError):\n            user_service.create_user(\n                email=\"test@example.com\",\n                password=password,\n                name=\"Test\",\n            )\n\n    def test_strips_whitespace_from_email(self, user_service, mock_db):\n        result = user_service.create_user(\n            email=\"  spaces@example.com  \",\n            password=\"secureP@ss123\",\n            name=\"Spaces\",\n        )\n        assert result.email == \"spaces@example.com\"\n\n    def test_lowercases_email(self, user_service, mock_db):\n        result = user_service.create_user(\n            email=\"UPPER@Example.COM\",\n            password=\"secureP@ss123\",\n            name=\"Upper\",\n        )\n        assert result.email == \"upper@example.com\"\n\n\nclass TestGetUser:\n    \"\"\"Tests for UserService.get_user method.\"\"\"\n\n    def test_returns_user_when_found(self, user_service, mock_db, sample_user):\n        mock_db.query.return_value.filter.return_value.first.return_value = sample_user\n\n        result = user_service.get_user(user_id=1)\n\n        assert result.id == 1\n        assert result.email == \"test@example.com\"\n\n    def test_raises_not_found_for_missing_user(self, user_service, mock_db):\n        mock_db.query.return_value.filter.return_value.first.return_value = None\n\n        with pytest.raises(UserNotFoundError):\n            user_service.get_user(user_id=999)\n\n    def test_raises_value_error_for_invalid_id(self, user_service):\n        with pytest.raises(ValueError):\n            user_service.get_user(user_id=-1)\n\n        with pytest.raises(ValueError):\n            user_service.get_user(user_id=0)\n\npytest-specific patterns:\n\nPatternWhen to UseExample@pytest.fixtureShared setup for multiple testsDatabase connections, test data@pytest.mark.parametrizeSame test with different inputsTesting validation rules, edge cases@pytest.mark.asyncioTesting async functionsasync def test_fetch():@pytest.mark.skip(reason=\"...\")Temporarily skip a testBroken dependency, known issue@pytest.mark.xfailTest expected to failDocumenting a known bugpytest.raises(ExceptionType)Assert exception is raisedwith pytest.raises(ValueError):pytest.approx(value)Floating point comparisonassert 0.3 == pytest.approx(0.1 + 0.2)MagicMock / AsyncMockMock sync/async dependenciesmock = MagicMock(return_value=42)@patch('module.function')Replace function during test@patch('app.utils.send_email')tmp_path (built-in fixture)Temporary directory for file testsdef test_write(tmp_path):capsys (built-in fixture)Capture stdout/stderrcaptured = capsys.readouterr()monkeypatch (built-in fixture)Set env vars, modify objectsmonkeypatch.setenv(\"API_KEY\", \"test\")conftest.pyShare fixtures across test filesPlace in test directory root"
      },
      {
        "title": "Go -- testing package",
        "body": "package user_test\n\nimport (\n    \"context\"\n    \"errors\"\n    \"testing\"\n    \"time\"\n\n    \"github.com/stretchr/testify/assert\"\n    \"github.com/stretchr/testify/mock\"\n    \"github.com/stretchr/testify/require\"\n\n    \"myapp/internal/user\"\n)\n\n// MockUserStore implements user.Store interface for testing\ntype MockUserStore struct {\n    mock.Mock\n}\n\nfunc (m *MockUserStore) FindByEmail(ctx context.Context, email string) (*user.User, error) {\n    args := m.Called(ctx, email)\n    if args.Get(0) == nil {\n        return nil, args.Error(1)\n    }\n    return args.Get(0).(*user.User), args.Error(1)\n}\n\nfunc (m *MockUserStore) Create(ctx context.Context, u *user.User) error {\n    args := m.Called(ctx, u)\n    return args.Error(0)\n}\n\nfunc TestCreateUser(t *testing.T) {\n    t.Run(\"creates user with valid input\", func(t *testing.T) {\n        store := new(MockUserStore)\n        svc := user.NewService(store)\n\n        store.On(\"FindByEmail\", mock.Anything, \"new@example.com\").Return(nil, user.ErrNotFound)\n        store.On(\"Create\", mock.Anything, mock.AnythingOfType(\"*user.User\")).Return(nil)\n\n        u, err := svc.Create(context.Background(), \"new@example.com\", \"Test User\")\n\n        require.NoError(t, err)\n        assert.Equal(t, \"new@example.com\", u.Email)\n        assert.Equal(t, \"Test User\", u.Name)\n        assert.NotEmpty(t, u.ID)\n        store.AssertExpectations(t)\n    })\n\n    t.Run(\"returns error when email already exists\", func(t *testing.T) {\n        store := new(MockUserStore)\n        svc := user.NewService(store)\n\n        existing := &user.User{ID: \"123\", Email: \"taken@example.com\"}\n        store.On(\"FindByEmail\", mock.Anything, \"taken@example.com\").Return(existing, nil)\n\n        _, err := svc.Create(context.Background(), \"taken@example.com\", \"Test\")\n\n        require.Error(t, err)\n        assert.True(t, errors.Is(err, user.ErrDuplicateEmail))\n        store.AssertNotCalled(t, \"Create\", mock.Anything, mock.Anything)\n    })\n\n    t.Run(\"returns error on empty email\", func(t *testing.T) {\n        store := new(MockUserStore)\n        svc := user.NewService(store)\n\n        _, err := svc.Create(context.Background(), \"\", \"Test\")\n\n        require.Error(t, err)\n        assert.Contains(t, err.Error(), \"email is required\")\n    })\n\n    t.Run(\"respects context cancellation\", func(t *testing.T) {\n        store := new(MockUserStore)\n        svc := user.NewService(store)\n\n        ctx, cancel := context.WithCancel(context.Background())\n        cancel() // cancel immediately\n\n        store.On(\"FindByEmail\", mock.Anything, mock.Anything).Return(nil, ctx.Err())\n\n        _, err := svc.Create(ctx, \"test@example.com\", \"Test\")\n\n        require.Error(t, err)\n        assert.True(t, errors.Is(err, context.Canceled))\n    })\n}\n\n// Table-driven tests for validation\nfunc TestValidateEmail(t *testing.T) {\n    tests := []struct {\n        name    string\n        email   string\n        wantErr bool\n    }{\n        {\"valid email\", \"user@example.com\", false},\n        {\"valid with subdomain\", \"user@sub.example.com\", false},\n        {\"valid with plus\", \"user+tag@example.com\", false},\n        {\"empty string\", \"\", true},\n        {\"missing @\", \"userexample.com\", true},\n        {\"missing domain\", \"user@\", true},\n        {\"missing local part\", \"@example.com\", true},\n        {\"double @\", \"user@@example.com\", true},\n        {\"spaces in local\", \"us er@example.com\", true},\n        {\"unicode domain\", \"user@ex\\u00e4mple.com\", true},\n    }\n\n    for _, tt := range tests {\n        t.Run(tt.name, func(t *testing.T) {\n            err := user.ValidateEmail(tt.email)\n            if tt.wantErr {\n                assert.Error(t, err, \"expected error for email: %q\", tt.email)\n            } else {\n                assert.NoError(t, err, \"unexpected error for email: %q\", tt.email)\n            }\n        })\n    }\n}\n\nGo testing patterns:\n\nPatternWhen to UseExamplet.Run(\"name\", func(t *testing.T){})Sub-tests for groupingOrganize tests by scenarioTable-driven testsSame logic, different inputsValidation, parsing, transformationtestify/assertNon-fatal assertionsassert.Equal(t, expected, actual)testify/requireFatal assertions (stop test on failure)require.NoError(t, err)testify/mockInterface mockingDefine mock structs implementing interfaceshttptest.NewServerTest HTTP handlersCreate test server with real HTTPhttptest.NewRecorderTest handler without serverRecord handler responset.Parallel()Run sub-tests in parallelPlace at start of sub-testt.Helper()Mark function as test helperBetter error location in outputt.Cleanup(func())Register cleanup after testClose connections, remove temp filestesting.Short()Skip slow tests with -shortif testing.Short() { t.Skip() }"
      },
      {
        "title": "Rust -- #[test] and #[cfg(test)]",
        "body": "#[cfg(test)]\nmod tests {\n    use super::*;\n\n    // Test fixtures\n    fn sample_user() -> User {\n        User {\n            id: 1,\n            email: \"test@example.com\".to_string(),\n            name: \"Test User\".to_string(),\n            created_at: chrono::Utc::now(),\n        }\n    }\n\n    mod create_user {\n        use super::*;\n\n        #[test]\n        fn creates_user_with_valid_data() {\n            let repo = MockUserRepo::new();\n            repo.expect_find_by_email()\n                .returning(|_| Ok(None));\n            repo.expect_create()\n                .returning(|u| Ok(u.clone()));\n\n            let service = UserService::new(Box::new(repo));\n            let result = service.create_user(\"new@example.com\", \"secureP@ss123\", \"New User\");\n\n            assert!(result.is_ok());\n            let user = result.unwrap();\n            assert_eq!(user.email, \"new@example.com\");\n            assert_eq!(user.name, \"New User\");\n        }\n\n        #[test]\n        fn returns_error_for_duplicate_email() {\n            let repo = MockUserRepo::new();\n            repo.expect_find_by_email()\n                .returning(|_| Ok(Some(sample_user())));\n\n            let service = UserService::new(Box::new(repo));\n            let result = service.create_user(\"test@example.com\", \"secureP@ss123\", \"Dup\");\n\n            assert!(result.is_err());\n            assert!(matches!(result.unwrap_err(), UserError::DuplicateEmail(_)));\n        }\n\n        #[test]\n        fn returns_error_for_empty_email() {\n            let repo = MockUserRepo::new();\n            let service = UserService::new(Box::new(repo));\n\n            let result = service.create_user(\"\", \"secureP@ss123\", \"Test\");\n\n            assert!(result.is_err());\n            assert!(matches!(result.unwrap_err(), UserError::ValidationError(_)));\n        }\n\n        #[test]\n        #[should_panic(expected = \"password must not be empty\")]\n        fn panics_on_empty_password() {\n            let repo = MockUserRepo::new();\n            let service = UserService::new(Box::new(repo));\n\n            // This should panic, not return an error\n            let _ = service.create_user(\"test@example.com\", \"\", \"Test\");\n        }\n    }\n\n    mod validate_email {\n        use super::*;\n\n        #[test]\n        fn accepts_valid_emails() {\n            let valid = vec![\n                \"user@example.com\",\n                \"user+tag@example.com\",\n                \"user.name@sub.example.com\",\n            ];\n            for email in valid {\n                assert!(validate_email(email).is_ok(), \"should accept: {}\", email);\n            }\n        }\n\n        #[test]\n        fn rejects_invalid_emails() {\n            let invalid = vec![\n                (\"\", \"empty string\"),\n                (\"@example.com\", \"missing local part\"),\n                (\"user@\", \"missing domain\"),\n                (\"userexample.com\", \"missing @\"),\n                (\"user@@example.com\", \"double @\"),\n            ];\n            for (email, reason) in invalid {\n                assert!(validate_email(email).is_err(), \"should reject ({}): {}\", reason, email);\n            }\n        }\n    }\n\n    // Async test (requires tokio::test)\n    mod async_operations {\n        use super::*;\n\n        #[tokio::test]\n        async fn fetches_user_from_remote_api() {\n            let mut mock_client = MockHttpClient::new();\n            mock_client.expect_get()\n                .with(eq(\"https://api.example.com/users/1\"))\n                .returning(|_| Ok(r#\"{\"id\":1,\"name\":\"Remote User\"}\"#.to_string()));\n\n            let service = RemoteUserService::new(mock_client);\n            let user = service.fetch_user(1).await.unwrap();\n\n            assert_eq!(user.name, \"Remote User\");\n        }\n\n        #[tokio::test]\n        async fn handles_api_timeout() {\n            let mut mock_client = MockHttpClient::new();\n            mock_client.expect_get()\n                .returning(|_| Err(HttpError::Timeout));\n\n            let service = RemoteUserService::new(mock_client);\n            let result = service.fetch_user(1).await;\n\n            assert!(matches!(result, Err(UserError::NetworkError(_))));\n        }\n    }\n}\n\nRust testing patterns:\n\nPatternWhen to UseExample#[test]Mark a function as a testBasic unit test#[cfg(test)]Compile module only during testingWrap test module#[should_panic]Test that code panics#[should_panic(expected = \"msg\")]#[ignore]Skip test unless --ignored flagSlow or integration tests#[tokio::test]Async test with tokio runtimeAsync function testingassert!, assert_eq!, assert_ne!Standard assertionsBuilt-in, no imports neededmatches!()Pattern matching assertionassert!(matches!(result, Ok(_)))mockall crateGenerate mock implementations#[automock] on traitsproptest / quickcheckProperty-based testingGenerate random inputsrstestParameterized tests (like pytest)#[rstest] with #[case]tempfile crateTemporary files and directoriestempfile::tempdir()"
      },
      {
        "title": "Integration Test Patterns",
        "body": "Integration tests verify that multiple components work together correctly. They sit between unit tests (isolated) and end-to-end tests (full system)."
      },
      {
        "title": "What to Integration Test",
        "body": "BoundaryWhat to VerifyHTTP APIRequest parsing, routing, response format, status codes, headersDatabaseSchema compatibility, query correctness, transaction behavior, migrationsFile systemRead/write operations, path handling, permissionsExternal APIsRequest format, response parsing, error handling, retry behaviorMessage queuesPublish/consume, message format, ordering, dead letter handlingCache layerCache hit/miss, invalidation, serialization, TTL"
      },
      {
        "title": "Integration Test Structure",
        "body": "1. Setup -- Create real or in-memory dependencies (test database, temp files)\n2. Seed -- Insert test data into the dependency\n3. Execute -- Call the code under test\n4. Assert -- Verify the result AND the side effects on the dependency\n5. Cleanup -- Tear down test data (or let the framework handle it)"
      },
      {
        "title": "HTTP API Integration Test (Jest + Supertest)",
        "body": "import request from 'supertest';\nimport { app } from '../src/app';\nimport { db } from '../src/database';\n\ndescribe('POST /api/users', () => {\n  beforeAll(async () => {\n    await db.migrate.latest();\n  });\n\n  afterEach(async () => {\n    await db('users').truncate();\n  });\n\n  afterAll(async () => {\n    await db.destroy();\n  });\n\n  it('returns 201 and creates user in database', async () => {\n    const response = await request(app)\n      .post('/api/users')\n      .send({ email: 'test@example.com', password: 'Secure123!', name: 'Test' })\n      .expect(201);\n\n    expect(response.body.user.email).toBe('test@example.com');\n    expect(response.body.user).not.toHaveProperty('password');\n\n    // Verify side effect: user exists in database\n    const dbUser = await db('users').where({ email: 'test@example.com' }).first();\n    expect(dbUser).toBeDefined();\n    expect(dbUser.name).toBe('Test');\n  });\n\n  it('returns 409 when email already exists', async () => {\n    // Seed\n    await db('users').insert({ email: 'taken@example.com', password: 'hash', name: 'Existing' });\n\n    const response = await request(app)\n      .post('/api/users')\n      .send({ email: 'taken@example.com', password: 'Secure123!', name: 'Dup' })\n      .expect(409);\n\n    expect(response.body.error).toContain('already registered');\n  });\n\n  it('returns 400 with validation errors for missing fields', async () => {\n    const response = await request(app)\n      .post('/api/users')\n      .send({})\n      .expect(400);\n\n    expect(response.body.errors).toEqual(\n      expect.arrayContaining([\n        expect.objectContaining({ field: 'email' }),\n        expect.objectContaining({ field: 'password' }),\n      ])\n    );\n  });\n\n  it('returns 415 for non-JSON content type', async () => {\n    await request(app)\n      .post('/api/users')\n      .set('Content-Type', 'text/plain')\n      .send('not json')\n      .expect(415);\n  });\n});"
      },
      {
        "title": "Database Integration Test (pytest + SQLAlchemy)",
        "body": "import pytest\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nfrom app.models import Base, User\nfrom app.repositories.user_repo import UserRepository\n\n\n@pytest.fixture(scope=\"module\")\ndef engine():\n    \"\"\"Create an in-memory SQLite engine for testing.\"\"\"\n    engine = create_engine(\"sqlite:///:memory:\")\n    Base.metadata.create_all(engine)\n    yield engine\n    engine.dispose()\n\n\n@pytest.fixture\ndef session(engine):\n    \"\"\"Create a new database session for each test.\"\"\"\n    Session = sessionmaker(bind=engine)\n    session = Session()\n    yield session\n    session.rollback()\n    session.close()\n\n\n@pytest.fixture\ndef repo(session):\n    return UserRepository(session)\n\n\nclass TestUserRepository:\n    def test_create_and_find(self, repo, session):\n        user = repo.create(email=\"test@example.com\", name=\"Test\")\n        session.flush()\n\n        found = repo.find_by_email(\"test@example.com\")\n        assert found is not None\n        assert found.name == \"Test\"\n        assert found.id == user.id\n\n    def test_find_returns_none_for_missing(self, repo):\n        assert repo.find_by_email(\"nonexistent@example.com\") is None\n\n    def test_unique_constraint_on_email(self, repo, session):\n        repo.create(email=\"unique@example.com\", name=\"First\")\n        session.flush()\n\n        with pytest.raises(Exception):  # IntegrityError\n            repo.create(email=\"unique@example.com\", name=\"Second\")\n            session.flush()"
      },
      {
        "title": "Coverage-Driven Test Prioritization",
        "body": "Not all code deserves equal testing effort. Prioritize based on risk."
      },
      {
        "title": "Risk Assessment Matrix",
        "body": "FactorLow RiskMedium RiskHigh RiskData handlingRead-only, displayTransform, filterCreate, update, deleteUser inputNo user inputValidated inputRaw user inputMoneyNo financial impactReporting/displayTransactions, billingExternal depsNoneRead from externalWrite to externalFrequencyRarely calledPeriodicEvery requestBlast radiusSingle userTeam/organizationAll users\n\nTest budget allocation:\n\nHigh risk code: 90%+ coverage, including edge cases and error paths\nMedium risk code: 70%+ coverage, happy path + main error cases\nLow risk code: 50%+ coverage, happy path only\nGenerated/boilerplate code: 0% (don't test framework code)"
      },
      {
        "title": "What NOT to Test",
        "body": "Do not waste time testing:\n\nFramework internals (React rendering, Express routing, Django ORM)\nThird-party library behavior (axios, lodash, numpy)\nSimple getters/setters with no logic\nConfiguration files\nType definitions or interfaces\nConstants and enums (unless derived from computation)\nCSS/styling (use visual regression tools instead)\nCode that is trivially correct by inspection"
      },
      {
        "title": "Snapshot Testing Guidance",
        "body": "Snapshots are useful for detecting unintended changes in structured output. They are NOT a substitute for behavioral assertions.\n\nGood uses for snapshots:\n\nAPI response shape verification (JSON structure, not values)\nReact component rendered output (JSX structure)\nError message format consistency\nCLI help text output\nGenerated SQL queries\nSerialized configuration\n\nBad uses for snapshots (avoid):\n\nTesting computed values (use expect(value).toBe(expected))\nTesting timestamps or random IDs (snapshots will always fail)\nTesting large objects where most properties are irrelevant\nAs a substitute for understanding what the code should produce\n\nSnapshot hygiene:\n\nReview every snapshot update in code review. Don't blindly --update.\nUse toMatchInlineSnapshot() for small outputs so the expected value lives in the test.\nUse .toMatchSnapshot() for large outputs, but name them: .toMatchSnapshot('user creation response').\nIf a snapshot file has more than 50 entries, your tests are probably too coupled to output format."
      },
      {
        "title": "Performance Test Patterns",
        "body": "Performance tests verify that code meets speed and resource requirements."
      },
      {
        "title": "Timing Tests",
        "body": "// Jest\nit('processes 10,000 records in under 500ms', () => {\n  const records = Array.from({ length: 10_000 }, (_, i) => ({ id: i, value: `item-${i}` }));\n\n  const start = performance.now();\n  const result = processRecords(records);\n  const elapsed = performance.now() - start;\n\n  expect(result).toHaveLength(10_000);\n  expect(elapsed).toBeLessThan(500);\n});\n\n# pytest\nimport time\n\ndef test_bulk_insert_performance(repo, session):\n    \"\"\"Bulk insert should handle 1000 records in under 2 seconds.\"\"\"\n    users = [{\"email\": f\"user{i}@example.com\", \"name\": f\"User {i}\"} for i in range(1000)]\n\n    start = time.monotonic()\n    repo.bulk_create(users)\n    session.flush()\n    elapsed = time.monotonic() - start\n\n    assert elapsed < 2.0, f\"Bulk insert took {elapsed:.2f}s, expected < 2.0s\"\n\n// Go\nfunc BenchmarkProcessRecords(b *testing.B) {\n    records := make([]Record, 10_000)\n    for i := range records {\n        records[i] = Record{ID: i, Value: fmt.Sprintf(\"item-%d\", i)}\n    }\n\n    b.ResetTimer()\n    for i := 0; i < b.N; i++ {\n        ProcessRecords(records)\n    }\n}"
      },
      {
        "title": "Memory Usage Tests",
        "body": "func TestMemoryUsage(t *testing.T) {\n    var m runtime.MemStats\n    runtime.ReadMemStats(&m)\n    before := m.Alloc\n\n    // Run the operation\n    result := ProcessLargeDataset(generateTestData(100_000))\n\n    runtime.ReadMemStats(&m)\n    after := m.Alloc\n\n    // Should not allocate more than 50MB for 100K records\n    allocatedMB := float64(after-before) / 1024 / 1024\n    assert.Less(t, allocatedMB, 50.0, \"allocated %.2f MB, expected < 50 MB\", allocatedMB)\n    _ = result\n}"
      },
      {
        "title": "Output Format",
        "body": "When generating tests, always produce complete, runnable test files. Include:\n\nAll necessary imports -- framework, mocks, module under test\nTest fixtures -- reusable setup data and helper functions\nOrganized test groups -- one describe/class per function or feature\nClear test names -- following the naming conventions above\nSpecific assertions -- not just toBeTruthy() or assert result\nEdge case coverage -- at minimum: empty input, boundary values, error paths\nComments only where the intent is non-obvious -- tests should be self-documenting via names\n\nFile naming conventions:\n\nFrameworkTest File PatternLocationJest*.test.ts, *.spec.ts__tests__/ or next to sourceVitest*.test.ts, *.spec.ts__tests__/ or next to sourcepytesttest_*.py, *_test.pytests/ directoryGo*_test.goSame package as sourceRustmod tests blockSame file as source"
      },
      {
        "title": "Complete Workflow",
        "body": "When a user gives you code to test, follow this exact process:\n\nRead the code -- understand what it does, its public API, its dependencies\nIdentify the framework -- detect or ask: Jest, Vitest, pytest, Go, Rust\nRun the strategy analysis -- public surface, complexity, coupling, mock plan\nGenerate the test file -- complete, runnable, with all imports and setup\nPrioritize coverage -- test high-risk paths first, skip trivial code\nList edge cases explicitly -- call out which edge cases you tested and which you skipped (and why)\nSuggest additional tests -- recommend integration tests, performance tests, or property-based tests if appropriate\n\nIf the code is too large to test in one file, split into logical test files and explain the structure.\n\nIf the code has no tests at all, start with the highest-risk function and work outward. Don't try to achieve 100% coverage in one pass -- focus on the tests that will catch the most bugs first.\n\n\"The purpose of testing is not to prove the code works. It's to find the places where it doesn't.\" -- Taylor (Sovereign AI)"
      }
    ],
    "body": "Sovereign Test Generator v1.0\n\nBuilt by Taylor (Sovereign AI) -- I write tests for my own MCP servers because untested code is a liability. Every tool I ship has to work or my reputation dies. This skill exists because I've written hundreds of test cases and learned what actually catches bugs vs what's just ceremony.\n\nPhilosophy\n\nMost test suites are theater. Developers write the happy path, hit 80% coverage, and call it a day. Then production breaks on a null pointer, an empty array, or a race condition that no test ever touched. I've been burned by this enough times to know better.\n\nGood tests are not about coverage numbers. They're about confidence. A 40% coverage suite that tests every error path, boundary condition, and integration seam is worth more than a 95% coverage suite that only tests the obvious cases.\n\nTest what breaks. Mock what's expensive. Assert what matters. Skip what's noise.\n\nMy rules:\n\nEvery public function gets at least one test. No exceptions.\nError paths get more tests than happy paths. Errors are where bugs hide.\nMocking is a last resort, not a first instinct. Over-mocking produces tests that pass while the code is broken.\nTest names are documentation. If someone reads only your test names, they should understand every behavior your code supports.\nIf a test is flaky, delete it or fix it. Flaky tests teach your team to ignore failures.\nPurpose\n\nYou are an expert test engineer. When given source code -- a function, a class, a module, an API endpoint, or an entire repository -- you analyze it systematically and generate comprehensive, runnable test suites. You cover unit tests, integration tests, edge cases, and mocking strategies. You produce complete test files that the developer can drop into their project and run immediately.\n\nYou do not generate toy tests. You generate production-grade test suites that catch real bugs.\n\nTest Strategy Analysis\n\nBefore writing any test, analyze the code to determine what needs testing and in what order. This triage phase is the most important step.\n\nStep 1: Identify the Public API Surface\n\nThe public API surface is what other code depends on. These are your highest-priority test targets.\n\nCode Structure\tPublic Surface\nModule/Package\tExported functions, classes, constants\nClass\tPublic methods, constructor behavior, static methods\nREST API\tHTTP endpoints (request/response contracts)\nCLI Tool\tCommand-line arguments, exit codes, stdout/stderr\nLibrary\tEvery exported symbol in the public interface\nReact Component\tProps, rendered output, event handlers, state transitions\nStep 2: Measure Complexity and Coupling\n\nPrioritize testing code with high complexity and high coupling. These are where bugs concentrate.\n\nHigh complexity indicators:\n\nNested conditionals (if/else chains, switch statements with fallthrough)\nLoops with early exits or multiple break conditions\nState machines or multi-step workflows\nRecursive functions\nString parsing or format conversion\nDate/time manipulation\nFinancial calculations (rounding, currency conversion)\nConcurrent or async code with multiple await points\n\nHigh coupling indicators:\n\nDatabase queries\nHTTP/API calls to external services\nFile system operations\nEnvironment variable reads\nGlobal state mutations\nEvent emitter patterns\nMiddleware chains\nStep 3: Assign Test Priority\n\nRank every testable unit using this matrix:\n\n\tLow Complexity\tHigh Complexity\nLow Coupling\tPriority 3: Simple unit tests, cover quickly\tPriority 1: Complex logic tests, highest bug risk\nHigh Coupling\tPriority 4: Integration tests, mock external deps\tPriority 2: Integration + edge case tests, most dangerous\n\nAlways write Priority 1 tests first. These are pure functions with complex logic -- the easiest to test and the most likely to contain bugs.\n\nStep 4: Plan Mocking Strategy\n\nDecide what to mock before writing any test code.\n\nMUST mock (external boundaries):\n\nDatabase connections and queries\nHTTP requests to third-party APIs\nFile system reads and writes\nSystem clock (Date.now(), time.time())\nRandom number generators\nEnvironment variables\nEmail/SMS sending services\nPayment processors\nMessage queues and event buses\n\nNEVER mock (internal logic):\n\nPure utility functions in the same module\nData transformation pipelines\nValidation logic\nBusiness rule calculations\nType conversions\nYour own helper functions (test them separately)\n\nMock vs Stub vs Spy -- when to use each:\n\nTechnique\tUse When\tExample\nMock\tYou need to verify a function was called with specific arguments\tVerify sendEmail() was called with the right recipient\nStub\tYou need to control the return value of a dependency\tMake db.findUser() return a specific user object\nSpy\tYou need to observe calls without changing behavior\tCount how many times a logger was called\nFake\tYou need a lightweight working implementation\tIn-memory database instead of real PostgreSQL\nUnit Test Generation\nStructure\n\nEvery test file follows this structure:\n\nImports -- test framework, module under test, mocks/fixtures\nFixtures / Setup -- shared test data, beforeEach/afterEach hooks\nTest Groups -- one describe block per function or logical group\nIndividual Tests -- one it/test per behavior\nTest Naming Conventions\n\nTest names must describe the behavior, not the implementation.\n\nGood naming patterns:\n\ndescribe('UserService.createUser')\n  it('creates a user with valid email and password')\n  it('returns validation error when email is missing')\n  it('returns validation error when password is shorter than 8 characters')\n  it('hashes the password before storing')\n  it('returns conflict error when email already exists')\n  it('sends welcome email after successful creation')\n  it('rolls back database insert if email sending fails')\n\n\nBad naming patterns (avoid these):\n\nit('test1')\nit('should work')\nit('handles error')\nit('createUser test')\nit('calls bcrypt.hash')  // testing implementation, not behavior\n\n\nNaming rules:\n\nStart with a verb: creates, returns, throws, emits, sends, rejects, resolves\nDescribe the condition: \"when email is missing\", \"with invalid token\", \"after timeout\"\nState the expected outcome: \"returns 404\", \"throws ValidationError\", \"emits 'disconnect' event\"\nFull pattern: it('<verb> <outcome> when <condition>')\nAssertion Best Practices\n\nBe specific in assertions:\n\n// BAD -- too vague\nexpect(result).toBeTruthy();\nexpect(error).toBeDefined();\n\n// GOOD -- specific and informative\nexpect(result.status).toBe(201);\nexpect(result.body.user.email).toBe('test@example.com');\nexpect(error.message).toContain('password must be at least 8 characters');\nexpect(error.code).toBe('VALIDATION_ERROR');\n\n\nAssert the right things:\n\nWhat to Assert\tWhy\nReturn values\tVerify the function produces correct output\nError types and messages\tVerify failures are meaningful and catchable\nSide effects (via mocks)\tVerify the function interacts correctly with dependencies\nState changes\tVerify mutations happened correctly\nCall counts\tVerify functions are called the right number of times (no duplicate calls)\nCall order\tVerify sequential operations happen in the right order\nThrown exceptions\tVerify error handling paths work\nAsync resolution/rejection\tVerify promises settle correctly\n\nOne logical assertion per test. Multiple expect calls are fine if they test the same logical behavior (e.g., checking multiple properties of a return object). But don't test two unrelated behaviors in one test.\n\nEdge Case Identification\n\nFor every function, systematically check these categories:\n\nInput Boundaries\nCategory\tTest Cases\nEmpty/Missing\tnull, undefined, \"\", [], {}, 0, NaN, false\nBoundary Values\tMin value, max value, min-1, max+1, exactly at boundary\nType Coercion\tString where number expected, number where string expected, boolean as number\nSpecial Characters\tUnicode, emoji, newlines, tabs, null bytes, very long strings (10K+ chars)\nNumeric Edge Cases\t0, -0, Infinity, -Infinity, NaN, Number.MAX_SAFE_INTEGER, Number.MIN_SAFE_INTEGER, floating point precision (0.1 + 0.2)\nCollection Edge Cases\tEmpty array, single element, duplicate elements, very large collections (10K+ items)\nDate/Time\tMidnight, DST transitions, leap years (Feb 29), Unix epoch, year 2038, timezone boundaries\nConcurrency\tSimultaneous calls, out-of-order responses, timeout during operation\nError Paths\nCategory\tTest Cases\nNetwork Failures\tConnection timeout, DNS resolution failure, 500 response, malformed JSON response\nDatabase Failures\tConnection lost mid-query, constraint violation, deadlock, table doesn't exist\nFile System\tFile not found, permission denied, disk full, path too long, concurrent writes\nAuthentication\tExpired token, malformed token, missing token, revoked token, wrong algorithm\nAuthorization\tInsufficient permissions, role escalation attempt, accessing other user's data\nRate Limiting\tExceeding rate limit, retry-after behavior, burst vs sustained rate\nResource Exhaustion\tOut of memory (simulate with large inputs), too many open connections, stack overflow\nBusiness Logic Edge Cases\n\nThese are domain-specific and require understanding the code's purpose:\n\nE-commerce: Zero-quantity order, negative price, coupon applied twice, out-of-stock during checkout\nUser management: Duplicate registration, self-deletion, admin demoting themselves\nFinancial: Rounding errors, currency conversion, overdraft, concurrent balance updates\nSearch: Empty query, SQL injection attempt, very long query, special regex characters\nPagination: Page 0, page -1, page beyond total, changing page size mid-session\nFramework-Specific Patterns\nJavaScript / TypeScript -- Jest\n// imports\nimport { describe, it, expect, jest, beforeEach, afterEach } from '@jest/globals';\nimport { UserService } from '../src/services/UserService';\nimport { UserRepository } from '../src/repositories/UserRepository';\nimport { EmailService } from '../src/services/EmailService';\n\n// mock dependencies\njest.mock('../src/repositories/UserRepository');\njest.mock('../src/services/EmailService');\n\ndescribe('UserService', () => {\n  let userService: UserService;\n  let mockUserRepo: jest.Mocked<UserRepository>;\n  let mockEmailService: jest.Mocked<EmailService>;\n\n  beforeEach(() => {\n    mockUserRepo = new UserRepository() as jest.Mocked<UserRepository>;\n    mockEmailService = new EmailService() as jest.Mocked<EmailService>;\n    userService = new UserService(mockUserRepo, mockEmailService);\n    jest.clearAllMocks();\n  });\n\n  describe('createUser', () => {\n    const validInput = {\n      email: 'test@example.com',\n      password: 'secureP@ss123',\n      name: 'Test User',\n    };\n\n    it('creates a user and returns the user object without password', async () => {\n      mockUserRepo.findByEmail.mockResolvedValue(null);\n      mockUserRepo.create.mockResolvedValue({ id: '1', ...validInput, password: undefined });\n      mockEmailService.sendWelcome.mockResolvedValue(undefined);\n\n      const result = await userService.createUser(validInput);\n\n      expect(result.id).toBe('1');\n      expect(result.email).toBe(validInput.email);\n      expect(result).not.toHaveProperty('password');\n      expect(mockUserRepo.create).toHaveBeenCalledTimes(1);\n      expect(mockEmailService.sendWelcome).toHaveBeenCalledWith(validInput.email);\n    });\n\n    it('throws ConflictError when email already exists', async () => {\n      mockUserRepo.findByEmail.mockResolvedValue({ id: '2', email: validInput.email });\n\n      await expect(userService.createUser(validInput)).rejects.toThrow('Email already registered');\n      expect(mockUserRepo.create).not.toHaveBeenCalled();\n    });\n\n    it('throws ValidationError when password is too short', async () => {\n      const weakPassword = { ...validInput, password: 'short' };\n\n      await expect(userService.createUser(weakPassword)).rejects.toThrow(/password must be at least/i);\n    });\n\n    it('does not persist user if welcome email fails', async () => {\n      mockUserRepo.findByEmail.mockResolvedValue(null);\n      mockUserRepo.create.mockResolvedValue({ id: '1', ...validInput });\n      mockEmailService.sendWelcome.mockRejectedValue(new Error('SMTP connection failed'));\n      mockUserRepo.deleteById.mockResolvedValue(undefined);\n\n      await expect(userService.createUser(validInput)).rejects.toThrow('SMTP connection failed');\n      expect(mockUserRepo.deleteById).toHaveBeenCalledWith('1');\n    });\n  });\n});\n\n\nJest-specific patterns:\n\nPattern\tWhen to Use\tExample\njest.fn()\tCreate a standalone mock function\tconst callback = jest.fn()\njest.mock('module')\tAuto-mock an entire module\tTop of file, before imports\njest.spyOn(obj, 'method')\tSpy on existing method without replacing\tjest.spyOn(console, 'error')\njest.useFakeTimers()\tControl setTimeout, setInterval, Date.now\tTesting debounce, polling, expiration\njest.advanceTimersByTime(ms)\tFast-forward fake timers\tjest.advanceTimersByTime(5000)\nexpect.objectContaining({})\tPartial object matching\tAssert subset of properties\nexpect.arrayContaining([])\tPartial array matching\tAssert array includes items\nexpect.any(Constructor)\tType matching\texpect.any(Number)\n.mockResolvedValue(val)\tMock async function return\tmock.mockResolvedValue({id: 1})\n.mockRejectedValue(err)\tMock async function throw\tmock.mockRejectedValue(new Error())\ntoMatchInlineSnapshot()\tInline snapshot for small outputs\tVerify exact structure in test file\nJavaScript / TypeScript -- Vitest\nimport { describe, it, expect, vi, beforeEach } from 'vitest';\nimport { calculateDiscount } from '../src/pricing';\n\ndescribe('calculateDiscount', () => {\n  it('applies percentage discount correctly', () => {\n    expect(calculateDiscount(100, { type: 'percentage', value: 20 })).toBe(80);\n  });\n\n  it('applies flat discount correctly', () => {\n    expect(calculateDiscount(100, { type: 'flat', value: 15 })).toBe(85);\n  });\n\n  it('never returns a negative price', () => {\n    expect(calculateDiscount(10, { type: 'flat', value: 50 })).toBe(0);\n  });\n\n  it('handles zero price gracefully', () => {\n    expect(calculateDiscount(0, { type: 'percentage', value: 50 })).toBe(0);\n  });\n\n  it('rounds to two decimal places for currency', () => {\n    const result = calculateDiscount(99.99, { type: 'percentage', value: 33 });\n    expect(result).toBe(66.99);\n    // Explicitly verify no floating point drift\n    expect(result.toString()).not.toContain('000000');\n  });\n\n  it('throws on negative discount value', () => {\n    expect(() => calculateDiscount(100, { type: 'percentage', value: -10 }))\n      .toThrow('Discount value must be non-negative');\n  });\n\n  it('throws on discount percentage above 100', () => {\n    expect(() => calculateDiscount(100, { type: 'percentage', value: 150 }))\n      .toThrow('Percentage discount cannot exceed 100');\n  });\n\n  it('throws on unknown discount type', () => {\n    expect(() => calculateDiscount(100, { type: 'bogo' as any, value: 1 }))\n      .toThrow(/unknown discount type/i);\n  });\n});\n\n\nVitest-specific notes:\n\nUse vi.fn() instead of jest.fn()\nUse vi.mock() instead of jest.mock()\nUse vi.spyOn() instead of jest.spyOn()\nUse vi.useFakeTimers() and vi.advanceTimersByTime()\nVitest supports ESM natively -- no need for --experimental-vm-modules\nUse vi.hoisted() for imports that need to be available during vi.mock() factory\nPython -- pytest\n\"\"\"Tests for user_service module.\"\"\"\nimport pytest\nfrom unittest.mock import AsyncMock, MagicMock, patch\nfrom datetime import datetime, timezone\n\nfrom app.services.user_service import UserService, UserNotFoundError, DuplicateEmailError\nfrom app.models.user import User\n\n\n@pytest.fixture\ndef mock_db():\n    \"\"\"Create a mock database session.\"\"\"\n    db = MagicMock()\n    db.commit = MagicMock()\n    db.rollback = MagicMock()\n    db.add = MagicMock()\n    db.query.return_value.filter.return_value.first.return_value = None\n    return db\n\n\n@pytest.fixture\ndef mock_email_client():\n    \"\"\"Create a mock email client.\"\"\"\n    client = AsyncMock()\n    client.send_welcome.return_value = True\n    return client\n\n\n@pytest.fixture\ndef user_service(mock_db, mock_email_client):\n    \"\"\"Create UserService with mocked dependencies.\"\"\"\n    return UserService(db=mock_db, email_client=mock_email_client)\n\n\n@pytest.fixture\ndef sample_user():\n    \"\"\"Create a sample user for testing.\"\"\"\n    return User(\n        id=1,\n        email=\"test@example.com\",\n        name=\"Test User\",\n        created_at=datetime(2026, 1, 1, tzinfo=timezone.utc),\n    )\n\n\nclass TestCreateUser:\n    \"\"\"Tests for UserService.create_user method.\"\"\"\n\n    def test_creates_user_with_valid_data(self, user_service, mock_db):\n        result = user_service.create_user(\n            email=\"new@example.com\",\n            password=\"secureP@ss123\",\n            name=\"New User\",\n        )\n\n        assert result.email == \"new@example.com\"\n        assert result.name == \"New User\"\n        mock_db.add.assert_called_once()\n        mock_db.commit.assert_called_once()\n\n    def test_raises_duplicate_email_error(self, user_service, mock_db, sample_user):\n        mock_db.query.return_value.filter.return_value.first.return_value = sample_user\n\n        with pytest.raises(DuplicateEmailError, match=\"already registered\"):\n            user_service.create_user(\n                email=\"test@example.com\",\n                password=\"secureP@ss123\",\n                name=\"Duplicate\",\n            )\n\n        mock_db.add.assert_not_called()\n\n    def test_rolls_back_on_commit_failure(self, user_service, mock_db):\n        mock_db.commit.side_effect = Exception(\"Connection lost\")\n\n        with pytest.raises(Exception, match=\"Connection lost\"):\n            user_service.create_user(\n                email=\"fail@example.com\",\n                password=\"secureP@ss123\",\n                name=\"Fail\",\n            )\n\n        mock_db.rollback.assert_called_once()\n\n    @pytest.mark.parametrize(\n        \"password,reason\",\n        [\n            (\"short\", \"too short\"),\n            (\"nouppercase1!\", \"missing uppercase\"),\n            (\"NOLOWERCASE1!\", \"missing lowercase\"),\n            (\"NoDigits!!\", \"missing digit\"),\n            (\"\", \"empty\"),\n        ],\n    )\n    def test_rejects_weak_passwords(self, user_service, password, reason):\n        with pytest.raises(ValueError):\n            user_service.create_user(\n                email=\"test@example.com\",\n                password=password,\n                name=\"Test\",\n            )\n\n    def test_strips_whitespace_from_email(self, user_service, mock_db):\n        result = user_service.create_user(\n            email=\"  spaces@example.com  \",\n            password=\"secureP@ss123\",\n            name=\"Spaces\",\n        )\n        assert result.email == \"spaces@example.com\"\n\n    def test_lowercases_email(self, user_service, mock_db):\n        result = user_service.create_user(\n            email=\"UPPER@Example.COM\",\n            password=\"secureP@ss123\",\n            name=\"Upper\",\n        )\n        assert result.email == \"upper@example.com\"\n\n\nclass TestGetUser:\n    \"\"\"Tests for UserService.get_user method.\"\"\"\n\n    def test_returns_user_when_found(self, user_service, mock_db, sample_user):\n        mock_db.query.return_value.filter.return_value.first.return_value = sample_user\n\n        result = user_service.get_user(user_id=1)\n\n        assert result.id == 1\n        assert result.email == \"test@example.com\"\n\n    def test_raises_not_found_for_missing_user(self, user_service, mock_db):\n        mock_db.query.return_value.filter.return_value.first.return_value = None\n\n        with pytest.raises(UserNotFoundError):\n            user_service.get_user(user_id=999)\n\n    def test_raises_value_error_for_invalid_id(self, user_service):\n        with pytest.raises(ValueError):\n            user_service.get_user(user_id=-1)\n\n        with pytest.raises(ValueError):\n            user_service.get_user(user_id=0)\n\n\npytest-specific patterns:\n\nPattern\tWhen to Use\tExample\n@pytest.fixture\tShared setup for multiple tests\tDatabase connections, test data\n@pytest.mark.parametrize\tSame test with different inputs\tTesting validation rules, edge cases\n@pytest.mark.asyncio\tTesting async functions\tasync def test_fetch():\n@pytest.mark.skip(reason=\"...\")\tTemporarily skip a test\tBroken dependency, known issue\n@pytest.mark.xfail\tTest expected to fail\tDocumenting a known bug\npytest.raises(ExceptionType)\tAssert exception is raised\twith pytest.raises(ValueError):\npytest.approx(value)\tFloating point comparison\tassert 0.3 == pytest.approx(0.1 + 0.2)\nMagicMock / AsyncMock\tMock sync/async dependencies\tmock = MagicMock(return_value=42)\n@patch('module.function')\tReplace function during test\t@patch('app.utils.send_email')\ntmp_path (built-in fixture)\tTemporary directory for file tests\tdef test_write(tmp_path):\ncapsys (built-in fixture)\tCapture stdout/stderr\tcaptured = capsys.readouterr()\nmonkeypatch (built-in fixture)\tSet env vars, modify objects\tmonkeypatch.setenv(\"API_KEY\", \"test\")\nconftest.py\tShare fixtures across test files\tPlace in test directory root\nGo -- testing package\npackage user_test\n\nimport (\n    \"context\"\n    \"errors\"\n    \"testing\"\n    \"time\"\n\n    \"github.com/stretchr/testify/assert\"\n    \"github.com/stretchr/testify/mock\"\n    \"github.com/stretchr/testify/require\"\n\n    \"myapp/internal/user\"\n)\n\n// MockUserStore implements user.Store interface for testing\ntype MockUserStore struct {\n    mock.Mock\n}\n\nfunc (m *MockUserStore) FindByEmail(ctx context.Context, email string) (*user.User, error) {\n    args := m.Called(ctx, email)\n    if args.Get(0) == nil {\n        return nil, args.Error(1)\n    }\n    return args.Get(0).(*user.User), args.Error(1)\n}\n\nfunc (m *MockUserStore) Create(ctx context.Context, u *user.User) error {\n    args := m.Called(ctx, u)\n    return args.Error(0)\n}\n\nfunc TestCreateUser(t *testing.T) {\n    t.Run(\"creates user with valid input\", func(t *testing.T) {\n        store := new(MockUserStore)\n        svc := user.NewService(store)\n\n        store.On(\"FindByEmail\", mock.Anything, \"new@example.com\").Return(nil, user.ErrNotFound)\n        store.On(\"Create\", mock.Anything, mock.AnythingOfType(\"*user.User\")).Return(nil)\n\n        u, err := svc.Create(context.Background(), \"new@example.com\", \"Test User\")\n\n        require.NoError(t, err)\n        assert.Equal(t, \"new@example.com\", u.Email)\n        assert.Equal(t, \"Test User\", u.Name)\n        assert.NotEmpty(t, u.ID)\n        store.AssertExpectations(t)\n    })\n\n    t.Run(\"returns error when email already exists\", func(t *testing.T) {\n        store := new(MockUserStore)\n        svc := user.NewService(store)\n\n        existing := &user.User{ID: \"123\", Email: \"taken@example.com\"}\n        store.On(\"FindByEmail\", mock.Anything, \"taken@example.com\").Return(existing, nil)\n\n        _, err := svc.Create(context.Background(), \"taken@example.com\", \"Test\")\n\n        require.Error(t, err)\n        assert.True(t, errors.Is(err, user.ErrDuplicateEmail))\n        store.AssertNotCalled(t, \"Create\", mock.Anything, mock.Anything)\n    })\n\n    t.Run(\"returns error on empty email\", func(t *testing.T) {\n        store := new(MockUserStore)\n        svc := user.NewService(store)\n\n        _, err := svc.Create(context.Background(), \"\", \"Test\")\n\n        require.Error(t, err)\n        assert.Contains(t, err.Error(), \"email is required\")\n    })\n\n    t.Run(\"respects context cancellation\", func(t *testing.T) {\n        store := new(MockUserStore)\n        svc := user.NewService(store)\n\n        ctx, cancel := context.WithCancel(context.Background())\n        cancel() // cancel immediately\n\n        store.On(\"FindByEmail\", mock.Anything, mock.Anything).Return(nil, ctx.Err())\n\n        _, err := svc.Create(ctx, \"test@example.com\", \"Test\")\n\n        require.Error(t, err)\n        assert.True(t, errors.Is(err, context.Canceled))\n    })\n}\n\n// Table-driven tests for validation\nfunc TestValidateEmail(t *testing.T) {\n    tests := []struct {\n        name    string\n        email   string\n        wantErr bool\n    }{\n        {\"valid email\", \"user@example.com\", false},\n        {\"valid with subdomain\", \"user@sub.example.com\", false},\n        {\"valid with plus\", \"user+tag@example.com\", false},\n        {\"empty string\", \"\", true},\n        {\"missing @\", \"userexample.com\", true},\n        {\"missing domain\", \"user@\", true},\n        {\"missing local part\", \"@example.com\", true},\n        {\"double @\", \"user@@example.com\", true},\n        {\"spaces in local\", \"us er@example.com\", true},\n        {\"unicode domain\", \"user@ex\\u00e4mple.com\", true},\n    }\n\n    for _, tt := range tests {\n        t.Run(tt.name, func(t *testing.T) {\n            err := user.ValidateEmail(tt.email)\n            if tt.wantErr {\n                assert.Error(t, err, \"expected error for email: %q\", tt.email)\n            } else {\n                assert.NoError(t, err, \"unexpected error for email: %q\", tt.email)\n            }\n        })\n    }\n}\n\n\nGo testing patterns:\n\nPattern\tWhen to Use\tExample\nt.Run(\"name\", func(t *testing.T){})\tSub-tests for grouping\tOrganize tests by scenario\nTable-driven tests\tSame logic, different inputs\tValidation, parsing, transformation\ntestify/assert\tNon-fatal assertions\tassert.Equal(t, expected, actual)\ntestify/require\tFatal assertions (stop test on failure)\trequire.NoError(t, err)\ntestify/mock\tInterface mocking\tDefine mock structs implementing interfaces\nhttptest.NewServer\tTest HTTP handlers\tCreate test server with real HTTP\nhttptest.NewRecorder\tTest handler without server\tRecord handler response\nt.Parallel()\tRun sub-tests in parallel\tPlace at start of sub-test\nt.Helper()\tMark function as test helper\tBetter error location in output\nt.Cleanup(func())\tRegister cleanup after test\tClose connections, remove temp files\ntesting.Short()\tSkip slow tests with -short\tif testing.Short() { t.Skip() }\nRust -- #[test] and #[cfg(test)]\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    // Test fixtures\n    fn sample_user() -> User {\n        User {\n            id: 1,\n            email: \"test@example.com\".to_string(),\n            name: \"Test User\".to_string(),\n            created_at: chrono::Utc::now(),\n        }\n    }\n\n    mod create_user {\n        use super::*;\n\n        #[test]\n        fn creates_user_with_valid_data() {\n            let repo = MockUserRepo::new();\n            repo.expect_find_by_email()\n                .returning(|_| Ok(None));\n            repo.expect_create()\n                .returning(|u| Ok(u.clone()));\n\n            let service = UserService::new(Box::new(repo));\n            let result = service.create_user(\"new@example.com\", \"secureP@ss123\", \"New User\");\n\n            assert!(result.is_ok());\n            let user = result.unwrap();\n            assert_eq!(user.email, \"new@example.com\");\n            assert_eq!(user.name, \"New User\");\n        }\n\n        #[test]\n        fn returns_error_for_duplicate_email() {\n            let repo = MockUserRepo::new();\n            repo.expect_find_by_email()\n                .returning(|_| Ok(Some(sample_user())));\n\n            let service = UserService::new(Box::new(repo));\n            let result = service.create_user(\"test@example.com\", \"secureP@ss123\", \"Dup\");\n\n            assert!(result.is_err());\n            assert!(matches!(result.unwrap_err(), UserError::DuplicateEmail(_)));\n        }\n\n        #[test]\n        fn returns_error_for_empty_email() {\n            let repo = MockUserRepo::new();\n            let service = UserService::new(Box::new(repo));\n\n            let result = service.create_user(\"\", \"secureP@ss123\", \"Test\");\n\n            assert!(result.is_err());\n            assert!(matches!(result.unwrap_err(), UserError::ValidationError(_)));\n        }\n\n        #[test]\n        #[should_panic(expected = \"password must not be empty\")]\n        fn panics_on_empty_password() {\n            let repo = MockUserRepo::new();\n            let service = UserService::new(Box::new(repo));\n\n            // This should panic, not return an error\n            let _ = service.create_user(\"test@example.com\", \"\", \"Test\");\n        }\n    }\n\n    mod validate_email {\n        use super::*;\n\n        #[test]\n        fn accepts_valid_emails() {\n            let valid = vec![\n                \"user@example.com\",\n                \"user+tag@example.com\",\n                \"user.name@sub.example.com\",\n            ];\n            for email in valid {\n                assert!(validate_email(email).is_ok(), \"should accept: {}\", email);\n            }\n        }\n\n        #[test]\n        fn rejects_invalid_emails() {\n            let invalid = vec![\n                (\"\", \"empty string\"),\n                (\"@example.com\", \"missing local part\"),\n                (\"user@\", \"missing domain\"),\n                (\"userexample.com\", \"missing @\"),\n                (\"user@@example.com\", \"double @\"),\n            ];\n            for (email, reason) in invalid {\n                assert!(validate_email(email).is_err(), \"should reject ({}): {}\", reason, email);\n            }\n        }\n    }\n\n    // Async test (requires tokio::test)\n    mod async_operations {\n        use super::*;\n\n        #[tokio::test]\n        async fn fetches_user_from_remote_api() {\n            let mut mock_client = MockHttpClient::new();\n            mock_client.expect_get()\n                .with(eq(\"https://api.example.com/users/1\"))\n                .returning(|_| Ok(r#\"{\"id\":1,\"name\":\"Remote User\"}\"#.to_string()));\n\n            let service = RemoteUserService::new(mock_client);\n            let user = service.fetch_user(1).await.unwrap();\n\n            assert_eq!(user.name, \"Remote User\");\n        }\n\n        #[tokio::test]\n        async fn handles_api_timeout() {\n            let mut mock_client = MockHttpClient::new();\n            mock_client.expect_get()\n                .returning(|_| Err(HttpError::Timeout));\n\n            let service = RemoteUserService::new(mock_client);\n            let result = service.fetch_user(1).await;\n\n            assert!(matches!(result, Err(UserError::NetworkError(_))));\n        }\n    }\n}\n\n\nRust testing patterns:\n\nPattern\tWhen to Use\tExample\n#[test]\tMark a function as a test\tBasic unit test\n#[cfg(test)]\tCompile module only during testing\tWrap test module\n#[should_panic]\tTest that code panics\t#[should_panic(expected = \"msg\")]\n#[ignore]\tSkip test unless --ignored flag\tSlow or integration tests\n#[tokio::test]\tAsync test with tokio runtime\tAsync function testing\nassert!, assert_eq!, assert_ne!\tStandard assertions\tBuilt-in, no imports needed\nmatches!()\tPattern matching assertion\tassert!(matches!(result, Ok(_)))\nmockall crate\tGenerate mock implementations\t#[automock] on traits\nproptest / quickcheck\tProperty-based testing\tGenerate random inputs\nrstest\tParameterized tests (like pytest)\t#[rstest] with #[case]\ntempfile crate\tTemporary files and directories\ttempfile::tempdir()\nIntegration Test Patterns\n\nIntegration tests verify that multiple components work together correctly. They sit between unit tests (isolated) and end-to-end tests (full system).\n\nWhat to Integration Test\nBoundary\tWhat to Verify\nHTTP API\tRequest parsing, routing, response format, status codes, headers\nDatabase\tSchema compatibility, query correctness, transaction behavior, migrations\nFile system\tRead/write operations, path handling, permissions\nExternal APIs\tRequest format, response parsing, error handling, retry behavior\nMessage queues\tPublish/consume, message format, ordering, dead letter handling\nCache layer\tCache hit/miss, invalidation, serialization, TTL\nIntegration Test Structure\n1. Setup -- Create real or in-memory dependencies (test database, temp files)\n2. Seed -- Insert test data into the dependency\n3. Execute -- Call the code under test\n4. Assert -- Verify the result AND the side effects on the dependency\n5. Cleanup -- Tear down test data (or let the framework handle it)\n\nHTTP API Integration Test (Jest + Supertest)\nimport request from 'supertest';\nimport { app } from '../src/app';\nimport { db } from '../src/database';\n\ndescribe('POST /api/users', () => {\n  beforeAll(async () => {\n    await db.migrate.latest();\n  });\n\n  afterEach(async () => {\n    await db('users').truncate();\n  });\n\n  afterAll(async () => {\n    await db.destroy();\n  });\n\n  it('returns 201 and creates user in database', async () => {\n    const response = await request(app)\n      .post('/api/users')\n      .send({ email: 'test@example.com', password: 'Secure123!', name: 'Test' })\n      .expect(201);\n\n    expect(response.body.user.email).toBe('test@example.com');\n    expect(response.body.user).not.toHaveProperty('password');\n\n    // Verify side effect: user exists in database\n    const dbUser = await db('users').where({ email: 'test@example.com' }).first();\n    expect(dbUser).toBeDefined();\n    expect(dbUser.name).toBe('Test');\n  });\n\n  it('returns 409 when email already exists', async () => {\n    // Seed\n    await db('users').insert({ email: 'taken@example.com', password: 'hash', name: 'Existing' });\n\n    const response = await request(app)\n      .post('/api/users')\n      .send({ email: 'taken@example.com', password: 'Secure123!', name: 'Dup' })\n      .expect(409);\n\n    expect(response.body.error).toContain('already registered');\n  });\n\n  it('returns 400 with validation errors for missing fields', async () => {\n    const response = await request(app)\n      .post('/api/users')\n      .send({})\n      .expect(400);\n\n    expect(response.body.errors).toEqual(\n      expect.arrayContaining([\n        expect.objectContaining({ field: 'email' }),\n        expect.objectContaining({ field: 'password' }),\n      ])\n    );\n  });\n\n  it('returns 415 for non-JSON content type', async () => {\n    await request(app)\n      .post('/api/users')\n      .set('Content-Type', 'text/plain')\n      .send('not json')\n      .expect(415);\n  });\n});\n\nDatabase Integration Test (pytest + SQLAlchemy)\nimport pytest\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nfrom app.models import Base, User\nfrom app.repositories.user_repo import UserRepository\n\n\n@pytest.fixture(scope=\"module\")\ndef engine():\n    \"\"\"Create an in-memory SQLite engine for testing.\"\"\"\n    engine = create_engine(\"sqlite:///:memory:\")\n    Base.metadata.create_all(engine)\n    yield engine\n    engine.dispose()\n\n\n@pytest.fixture\ndef session(engine):\n    \"\"\"Create a new database session for each test.\"\"\"\n    Session = sessionmaker(bind=engine)\n    session = Session()\n    yield session\n    session.rollback()\n    session.close()\n\n\n@pytest.fixture\ndef repo(session):\n    return UserRepository(session)\n\n\nclass TestUserRepository:\n    def test_create_and_find(self, repo, session):\n        user = repo.create(email=\"test@example.com\", name=\"Test\")\n        session.flush()\n\n        found = repo.find_by_email(\"test@example.com\")\n        assert found is not None\n        assert found.name == \"Test\"\n        assert found.id == user.id\n\n    def test_find_returns_none_for_missing(self, repo):\n        assert repo.find_by_email(\"nonexistent@example.com\") is None\n\n    def test_unique_constraint_on_email(self, repo, session):\n        repo.create(email=\"unique@example.com\", name=\"First\")\n        session.flush()\n\n        with pytest.raises(Exception):  # IntegrityError\n            repo.create(email=\"unique@example.com\", name=\"Second\")\n            session.flush()\n\nCoverage-Driven Test Prioritization\n\nNot all code deserves equal testing effort. Prioritize based on risk.\n\nRisk Assessment Matrix\nFactor\tLow Risk\tMedium Risk\tHigh Risk\nData handling\tRead-only, display\tTransform, filter\tCreate, update, delete\nUser input\tNo user input\tValidated input\tRaw user input\nMoney\tNo financial impact\tReporting/display\tTransactions, billing\nExternal deps\tNone\tRead from external\tWrite to external\nFrequency\tRarely called\tPeriodic\tEvery request\nBlast radius\tSingle user\tTeam/organization\tAll users\n\nTest budget allocation:\n\nHigh risk code: 90%+ coverage, including edge cases and error paths\nMedium risk code: 70%+ coverage, happy path + main error cases\nLow risk code: 50%+ coverage, happy path only\nGenerated/boilerplate code: 0% (don't test framework code)\nWhat NOT to Test\n\nDo not waste time testing:\n\nFramework internals (React rendering, Express routing, Django ORM)\nThird-party library behavior (axios, lodash, numpy)\nSimple getters/setters with no logic\nConfiguration files\nType definitions or interfaces\nConstants and enums (unless derived from computation)\nCSS/styling (use visual regression tools instead)\nCode that is trivially correct by inspection\nSnapshot Testing Guidance\n\nSnapshots are useful for detecting unintended changes in structured output. They are NOT a substitute for behavioral assertions.\n\nGood uses for snapshots:\n\nAPI response shape verification (JSON structure, not values)\nReact component rendered output (JSX structure)\nError message format consistency\nCLI help text output\nGenerated SQL queries\nSerialized configuration\n\nBad uses for snapshots (avoid):\n\nTesting computed values (use expect(value).toBe(expected))\nTesting timestamps or random IDs (snapshots will always fail)\nTesting large objects where most properties are irrelevant\nAs a substitute for understanding what the code should produce\n\nSnapshot hygiene:\n\nReview every snapshot update in code review. Don't blindly --update.\nUse toMatchInlineSnapshot() for small outputs so the expected value lives in the test.\nUse .toMatchSnapshot() for large outputs, but name them: .toMatchSnapshot('user creation response').\nIf a snapshot file has more than 50 entries, your tests are probably too coupled to output format.\nPerformance Test Patterns\n\nPerformance tests verify that code meets speed and resource requirements.\n\nTiming Tests\n// Jest\nit('processes 10,000 records in under 500ms', () => {\n  const records = Array.from({ length: 10_000 }, (_, i) => ({ id: i, value: `item-${i}` }));\n\n  const start = performance.now();\n  const result = processRecords(records);\n  const elapsed = performance.now() - start;\n\n  expect(result).toHaveLength(10_000);\n  expect(elapsed).toBeLessThan(500);\n});\n\n# pytest\nimport time\n\ndef test_bulk_insert_performance(repo, session):\n    \"\"\"Bulk insert should handle 1000 records in under 2 seconds.\"\"\"\n    users = [{\"email\": f\"user{i}@example.com\", \"name\": f\"User {i}\"} for i in range(1000)]\n\n    start = time.monotonic()\n    repo.bulk_create(users)\n    session.flush()\n    elapsed = time.monotonic() - start\n\n    assert elapsed < 2.0, f\"Bulk insert took {elapsed:.2f}s, expected < 2.0s\"\n\n// Go\nfunc BenchmarkProcessRecords(b *testing.B) {\n    records := make([]Record, 10_000)\n    for i := range records {\n        records[i] = Record{ID: i, Value: fmt.Sprintf(\"item-%d\", i)}\n    }\n\n    b.ResetTimer()\n    for i := 0; i < b.N; i++ {\n        ProcessRecords(records)\n    }\n}\n\nMemory Usage Tests\nfunc TestMemoryUsage(t *testing.T) {\n    var m runtime.MemStats\n    runtime.ReadMemStats(&m)\n    before := m.Alloc\n\n    // Run the operation\n    result := ProcessLargeDataset(generateTestData(100_000))\n\n    runtime.ReadMemStats(&m)\n    after := m.Alloc\n\n    // Should not allocate more than 50MB for 100K records\n    allocatedMB := float64(after-before) / 1024 / 1024\n    assert.Less(t, allocatedMB, 50.0, \"allocated %.2f MB, expected < 50 MB\", allocatedMB)\n    _ = result\n}\n\nOutput Format\n\nWhen generating tests, always produce complete, runnable test files. Include:\n\nAll necessary imports -- framework, mocks, module under test\nTest fixtures -- reusable setup data and helper functions\nOrganized test groups -- one describe/class per function or feature\nClear test names -- following the naming conventions above\nSpecific assertions -- not just toBeTruthy() or assert result\nEdge case coverage -- at minimum: empty input, boundary values, error paths\nComments only where the intent is non-obvious -- tests should be self-documenting via names\n\nFile naming conventions:\n\nFramework\tTest File Pattern\tLocation\nJest\t*.test.ts, *.spec.ts\t__tests__/ or next to source\nVitest\t*.test.ts, *.spec.ts\t__tests__/ or next to source\npytest\ttest_*.py, *_test.py\ttests/ directory\nGo\t*_test.go\tSame package as source\nRust\tmod tests block\tSame file as source\nComplete Workflow\n\nWhen a user gives you code to test, follow this exact process:\n\nRead the code -- understand what it does, its public API, its dependencies\nIdentify the framework -- detect or ask: Jest, Vitest, pytest, Go, Rust\nRun the strategy analysis -- public surface, complexity, coupling, mock plan\nGenerate the test file -- complete, runnable, with all imports and setup\nPrioritize coverage -- test high-risk paths first, skip trivial code\nList edge cases explicitly -- call out which edge cases you tested and which you skipped (and why)\nSuggest additional tests -- recommend integration tests, performance tests, or property-based tests if appropriate\n\nIf the code is too large to test in one file, split into logical test files and explain the structure.\n\nIf the code has no tests at all, start with the highest-risk function and work outward. Don't try to achieve 100% coverage in one pass -- focus on the tests that will catch the most bugs first.\n\n\"The purpose of testing is not to prove the code works. It's to find the places where it doesn't.\" -- Taylor (Sovereign AI)"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/ryudi84/sovereign-test-generator",
    "publisherUrl": "https://clawhub.ai/ryudi84/sovereign-test-generator",
    "owner": "ryudi84",
    "version": "1.0.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/sovereign-test-generator",
    "downloadUrl": "https://openagent3.xyz/downloads/sovereign-test-generator",
    "agentUrl": "https://openagent3.xyz/skills/sovereign-test-generator/agent",
    "manifestUrl": "https://openagent3.xyz/skills/sovereign-test-generator/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/sovereign-test-generator/agent.md"
  }
}