{
  "schemaVersion": "1.0",
  "item": {
    "slug": "clean-pytest",
    "name": "Clean Pytest",
    "source": "tencent",
    "type": "skill",
    "category": "开发工具",
    "sourceUrl": "https://clawhub.ai/marcoracer/clean-pytest",
    "canonicalUrl": "https://clawhub.ai/marcoracer/clean-pytest",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/clean-pytest",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=clean-pytest",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "skill.json",
      "SKILL.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-30T16:55:25.780Z",
      "expiresAt": "2026-05-07T16:55:25.780Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
        "contentDisposition": "attachment; filename=\"network-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/clean-pytest"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/clean-pytest",
    "agentPageUrl": "https://openagent3.xyz/skills/clean-pytest/agent",
    "manifestUrl": "https://openagent3.xyz/skills/clean-pytest/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/clean-pytest/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Clean Pytest",
        "body": "Clean, maintainable pytest test patterns using Fake-based testing, contract testing, and dependency injection. Focuses on test isolation, reusability, and clarity through explicit AAA pattern and well-structured fixtures."
      },
      {
        "title": "When to Use",
        "body": "Setting up test suites for Python/MCP projects\nCreating Fake implementations for external dependencies\nWriting contract tests for MCP tools/controllers\nImplementing test patterns with dependency injection\nTesting layered architectures (Controllers → Services → Repositories)\nWriting parametrized tests for multiple scenarios"
      },
      {
        "title": "1. Fakes over Mocks",
        "body": "Use Fake classes instead of mocking with unittest.mock. Fakes are in-memory implementations that mimic real dependencies without external calls.\n\nWhy Fakes?\n\nMore readable and maintainable\nEasier to debug\nBetter test isolation\nNo monkey-patching magic\nSelf-documenting behavior"
      },
      {
        "title": "2. Explicit AAA Pattern",
        "body": "Structure every test into three clear phases with comments:\n\n# Arrange\n# Set up test data and dependencies\n\n# Act\n# Execute the code under test\n\n# Assert\n# Verify the result"
      },
      {
        "title": "3. Dependency Injection in Fixtures",
        "body": "Inject dependencies between fixtures to maintain relationships and avoid duplication."
      },
      {
        "title": "4. Contract Testing",
        "body": "Verify that components register tools/functions correctly and pass expected arguments."
      },
      {
        "title": "Architecture Pattern",
        "body": "Controller (MCP Tools)\n    ↓\nService (Business Logic)\n    ↓\nRepository (Data Access)\n    ↓\nFake (Test Implementation)"
      },
      {
        "title": "Basic Fake Structure",
        "body": "Create a Fake class that implements the same interface as the real dependency:\n\n# tests/fakes.py\nfrom typing import Any, Dict, List, Optional\n\nclass FakeAuth:\n    \"\"\"Fake implementation of AuthProvider for testing.\"\"\"\n    def __init__(self) -> None:\n        self.created: List[Dict[str, Any]] = []\n        self.deleted: List[str] = []\n        self._seq = 0\n        self.fail_on_create: bool = False\n\n    def create_user(self, email: str, password: str, display_name: str) -> str:\n        if self.fail_on_create:\n            raise RuntimeError(\"create_user failed (fake)\")\n        self._seq += 1\n        uid = f\"uid-{self._seq}\"\n        rec = {\"uid\": uid, \"email\": email, \"display_name\": display_name}\n        self.created.append(rec)\n        return uid\n\n    def delete_user(self, uid: str) -> None:\n        self.deleted.append(uid)"
      },
      {
        "title": "Repository Fake",
        "body": "class FakeUsersRepo:\n    \"\"\"Fake implementation of UsersRepository.\"\"\"\n    def __init__(self) -> None:\n        self.users: Dict[str, Dict[str, Any]] = {}\n        self.fail_on_upsert: bool = False\n\n    def upsert_user_doc(self, uid: str, data: Dict[str, Any]) -> None:\n        if self.fail_on_upsert:\n            raise RuntimeError(\"upsert_user_doc failed (fake)\")\n        self.users[uid] = dict(data)\n\n    def list_users(self, limit: Optional[int] = None) -> List[Dict[str, Any]]:\n        items = list(self.users.values())\n        if limit and limit > 0:\n            items = items[:limit]\n        return [dict(it) for it in items]"
      },
      {
        "title": "Controlled Failure Fakes",
        "body": "class FakeAuth:\n    def __init__(self) -> None:\n        self.fail_on_create: bool = False  # Control failure in tests\n\n    def create_user(self, email: str, password: str, display_name: str) -> str:\n        if self.fail_on_create:\n            raise RuntimeError(\"create_user failed (fake)\")\n        # ... rest of implementation"
      },
      {
        "title": "Nested Repository Fakes",
        "body": "class FakeSectorsRepo:\n    def __init__(self, institutions: FakeInstitutionsRepo | None = None) -> None:\n        self.institutions = institutions  # Inject dependency\n        self.data: Dict[str, Dict[str, Dict[str, Any]]] = {}\n\n    def institution_exists(self, institution_id: str) -> bool:\n        return bool(self.institutions and institution_id in self.institutions.data)\n\n    def upsert_sector(self, institution_id: str, sector_id: str, data: Dict[str, Any]) -> None:\n        self.data.setdefault(institution_id, {})[sector_id] = dict(data)"
      },
      {
        "title": "Basic Fixture (conftest.py)",
        "body": "# tests/conftest.py\nimport pytest\nfrom tests.fakes import FakeAuth, FakeUsersRepo\n\n@pytest.fixture()\ndef fake_auth():\n    \"\"\"Provide a fresh FakeAuth for each test.\"\"\"\n    return FakeAuth()\n\n@pytest.fixture()\ndef fake_users_repo():\n    \"\"\"Provide a fresh FakeUsersRepo for each test.\"\"\"\n    return FakeUsersRepo()"
      },
      {
        "title": "Fixture with Dependency Injection",
        "body": "@pytest.fixture()\ndef fake_sectors_repo(fake_institutions_repo):\n    \"\"\"FakeSectorsRepo depends on FakeInstitutionsRepo.\"\"\"\n    return FakeSectorsRepo(institutions=fake_institutions_repo)\n\n@pytest.fixture()\ndef fake_rooms_repo(fake_sectors_repo):\n    \"\"\"FakeRoomsRepo depends on FakeSectorsRepo.\"\"\"\n    return FakeRoomsRepo(sectors=fake_sectors_repo)"
      },
      {
        "title": "Environment Fixture",
        "body": "@pytest.fixture()\ndef user_env(fake_auth, fake_users_repo):\n    \"\"\"Provide service and all dependencies for user operations.\"\"\"\n    from myapp.services.user_service import UserService\n    svc = UserService(fake_auth, fake_users_repo)\n    return svc, fake_auth, fake_users_repo"
      },
      {
        "title": "Seeded Environment Fixture",
        "body": "@pytest.fixture()\ndef user_env_seeded(user_env):\n    \"\"\"Environment with pre-seeded data.\"\"\"\n    svc, auth, repo = user_env\n    svc.add_user(email=\"test@example.com\", password=\"secret\", name=\"Test User\")\n    return svc"
      },
      {
        "title": "Fixture with Cleanup",
        "body": "@pytest.fixture()\ndef temp_file():\n    \"\"\"Provide a temporary file and clean up after test.\"\"\"\n    import tempfile\n    import os\n    fd, path = tempfile.mkstemp()\n    os.close(fd)\n    yield path\n    os.unlink(path)"
      },
      {
        "title": "Basic AAA Pattern Test",
        "body": "# tests/test_user_service.py\nimport pytest\nfrom myapp.services.user_service import UserService\n\ndef test_add_user_success(fake_auth, fake_users_repo):\n    # Arrange\n    svc = UserService(fake_auth, fake_users_repo)\n    email = \"test@example.com\"\n    password = \"secret\"\n    name = \"Test User\"\n\n    # Act\n    result = svc.add_user(email=email, password=password, name=name)\n\n    # Assert\n    assert result[\"status\"] == \"ok\"\n    assert result[\"user\"][\"email\"] == email\n    assert result[\"user\"][\"name\"] == name\n    assert result[\"uid\"] in fake_users_repo.users"
      },
      {
        "title": "Parametrized Tests",
        "body": "@pytest.mark.parametrize(\n    \"email,password,name,role\",\n    [\n        (\"a@example.com\", \"secret\", \"Alice\", \"admin\"),\n        (\"b@example.com\", \"p@ss\", \"Bob\", \"user\"),\n    ],\n)\ndef test_add_user_parametrized(user_env, email, password, name, role):\n    svc, _auth, _repo = user_env\n\n    # Act\n    res = svc.add_user(email=email, password=password, name=name, global_role=role)\n\n    # Assert\n    assert res[\"status\"] == \"ok\"\n    assert res[\"user\"][\"email\"] == email\n    assert res[\"user\"][\"name\"] == name\n    assert res[\"user\"][\"globalRole\"] == role"
      },
      {
        "title": "Testing Error Scenarios with Fakes",
        "body": "@pytest.mark.parametrize(\"email\", [\"c@example.com\", \"d@example.com\"])\ndef test_add_user_rollback_on_firestore_failure(fake_auth, fake_users_repo, email):\n    # Arrange\n    fake_users_repo.fail_on_upsert = True\n    svc = UserService(fake_auth, fake_users_repo)\n\n    # Act & Assert\n    with pytest.raises(RuntimeError):\n        svc.add_user(email=email, password=\"secret\", name=\"Bob\")\n\n    # Assert rollback\n    assert fake_auth.deleted, \"Expected auth user to be deleted on Firestore failure\""
      },
      {
        "title": "Testing Timestamp Normalization",
        "body": "def test_list_users_normalizes_timestamps_to_iso(user_env):\n    # Arrange\n    svc, _auth, repo = user_env\n    from datetime import datetime\n    repo.users[\"u1\"] = {\n        \"id\": \"u1\",\n        \"email\": \"x@y.z\",\n        \"name\": \"X\",\n        \"globalRole\": \"user\",\n        \"createdAt\": datetime(2024, 1, 1),\n        \"updatedAt\": datetime(2024, 1, 2),\n    }\n\n    # Act\n    res = svc.list_users(limit=10)\n\n    # Assert\n    assert res[\"status\"] == \"ok\"\n    assert res[\"count\"] == 1\n    user = res[\"users\"][0]\n    assert isinstance(user[\"createdAt\"], str)\n    assert isinstance(user[\"updatedAt\"], str)"
      },
      {
        "title": "MCP Tool Registration Contract",
        "body": "Test that controllers properly register tools with expected signatures:\n\n# tests/test_controllers_contract.py\nfrom typing import Any, Callable, Dict\n\nclass FakeMCP:\n    \"\"\"Minimal FakeMCP for contract testing.\"\"\"\n    def __init__(self) -> None:\n        self.tools: Dict[str, Callable[..., Any]] = {}\n        self.meta: Dict[str, Dict[str, Any]] = {}\n\n    def tool(self, name: str, description: str, tags: Optional[set] = None, meta: Optional[dict] = None):\n        def decorator(fn: Callable[..., Any]):\n            self.tools[name] = fn\n            self.meta[name] = {\n                \"description\": description,\n                \"tags\": set(tags or set()),\n                \"meta\": dict(meta or {}),\n            }\n            return fn\n        return decorator\n\n\nclass FakeUserService:\n    \"\"\"Simple fake service that records calls.\"\"\"\n    def __init__(self):\n        self.calls = []\n\n    def add_user(self, **kwargs):\n        self.calls.append((\"add_user\", kwargs))\n        return {\"status\": \"ok\", \"op\": \"add_user\", \"args\": kwargs}\n\n\ndef test_users_controller_contract():\n    # Arrange\n    from myapp.controllers.users_controller import UsersController\n    fake = FakeMCP()\n    svc = FakeUserService()\n    UsersController(fake, svc)\n\n    # Assert tool registration\n    assert \"add_user\" in fake.tools\n    assert \"list_users\" in fake.tools\n\n    # Act & Assert tool behavior\n    res = fake.tools[\"add_user\"](\n        email=\"a@x.y\", password=\"s3cr3t\", name=\"Alice\", global_role=\"admin\"\n    )\n    assert res[\"status\"] == \"ok\"\n    assert res[\"op\"] == \"add_user\"\n    assert res[\"args\"][\"email\"] == \"a@x.y\""
      },
      {
        "title": "Parametrized Contract Tests",
        "body": "@pytest.mark.parametrize(\n    \"email,password,name,role\",\n    [\n        (\"a@x.y\", \"s3cr3t\", \"Alice\", \"admin\"),\n        (\"b@x.y\", \"p@ssw0rd\", \"Bob\", \"user\"),\n    ],\n)\ndef test_users_add_user_parametrized(_users_env, email, password, name, role):\n    # Arrange\n    fake, _ = _users_env\n\n    # Act\n    res = fake.tools[\"add_user\"](\n        email=email, password=password, name=name, global_role=role\n    )\n\n    # Assert\n    assert res[\"status\"] == \"ok\"\n    assert res[\"op\"] == \"add_user\"\n    assert res[\"args\"][\"email\"] == email"
      },
      {
        "title": "Testing Repository Operations",
        "body": "@pytest.fixture()\ndef repo_env(fake_institutions_repo, fake_sectors_repo):\n    # Seed data\n    fake_institutions_repo.upsert(\"inst1\", {\"id\": \"inst1\", \"name\": \"Inst One\"})\n    fake_sectors_repo.upsert_sector(\n        \"inst1\", \"er\", {\"id\": \"er\", \"name\": \"ER\", \"slug\": \"er\", \"isActive\": True}\n    )\n    return fake_sectors_repo"
      },
      {
        "title": "Testing Multiple Data Scenarios",
        "body": "@pytest.mark.parametrize(\"rooms\", [\n    [\"101\"],\n    [\"201\", {\"name\": \"102\", \"id\": \"room-102\"}],\n])\ndef test_add_and_list_rooms(room_env, rooms):\n    svc, _ = room_env\n\n    # Act\n    res = svc.add_sector_rooms(\"inst1\", \"er\", rooms)\n\n    # Assert\n    assert res[\"status\"] == \"ok\"\n    assert res[\"count\"] == len(rooms)\n\n    lst = svc.list_sector_rooms(\"inst1\", \"er\", limit=10)\n    assert lst[\"status\"] == \"ok\"\n    assert lst[\"count\"] == len(rooms)"
      },
      {
        "title": "Testing Limit Behavior",
        "body": "@pytest.mark.parametrize(\"limit\", [1, 3])\ndef test_list_rooms_limits(room_env_seeded, limit):\n    svc = room_env_seeded\n\n    # Act\n    lst = svc.list_sector_rooms(\"inst1\", \"er\", limit=limit)\n\n    # Assert\n    assert lst[\"status\"] == \"ok\"\n    assert lst[\"count\"] == min(2, limit)  # 2 items seeded"
      },
      {
        "title": "Testing Not Found Scenarios",
        "body": "@pytest.mark.parametrize(\"room_id,deleted\", [\n    (\"room-102\", True),\n    (\"room-999\", False),\n])\ndef test_remove_rooms_parametrized(room_env_seeded, room_id, deleted):\n    svc = room_env_seeded\n\n    # Act\n    res = svc.remove_sector_room(\"inst1\", \"er\", room_id)\n\n    # Assert\n    assert res[\"deleted\"] is deleted\n    if not deleted:\n        assert res.get(\"reason\") == \"room_not_found\""
      },
      {
        "title": "Conditional Integration Tests",
        "body": "Skip integration tests when external dependencies are not available:\n\n# tests/test_integration_wiring.py\nimport os\nimport pytest\n\n# Gate this integration test on presence of credentials\n_ENV_KEYS = (\n    \"FIREBASE_SERVICE_ACCOUNT\",\n    \"GOOGLE_APPLICATION_CREDENTIALS\",\n)\n_has_env_creds = any(os.getenv(k) for k in _ENV_KEYS)\n\npytestmark = [\n    pytest.mark.integration,\n    pytest.mark.skipif(\n        not _has_env_creds,\n        reason=(\n            \"Integration test requires Firebase Admin credentials via env \"\n            \"(FIREBASE_SERVICE_ACCOUNT or GOOGLE_APPLICATION_CREDENTIALS)\"\n        ),\n    ),\n]\n\n@pytest.mark.integration\ndef test_build_app_initializes_and_registers_tools():\n    # Arrange\n    from myapp.wiring import build_app\n\n    # Act\n    app = build_app()\n\n    # Assert\n    assert hasattr(app, \"run\")"
      },
      {
        "title": "Test Isolation",
        "body": "Each test should be independent and not share state:\n\ndef test_user_created_in_one_test_not_visible_in_another(fake_auth, fake_users_repo):\n    # Arrange\n    svc1 = UserService(fake_auth, fake_users_repo)\n\n    # Act\n    result1 = svc1.add_user(email=\"test1@example.com\", password=\"secret\", name=\"User1\")\n\n    # Assert - second test with fresh fixtures should not see this user\n    svc2 = UserService(fake_auth, fake_users_repo)\n    users = svc2.list_users()\n    assert users[\"count\"] == 1  # Only the user from this test"
      },
      {
        "title": "Don't Mock What You Don't Own",
        "body": "❌ Bad - Mocking external library:\n\n@patch('firebase_admin.auth.create_user')\ndef test_add_user(mock_create_user):\n    mock_create_user.return_value = Mock(uid=\"uid-1\")\n    # ... test code\n\n✅ Good - Use Fake for your interface:\n\ndef test_add_user(fake_auth, fake_users_repo):\n    svc = UserService(fake_auth, fake_users_repo)\n    # ... test code"
      },
      {
        "title": "Don't Test Implementation Details",
        "body": "❌ Bad - Testing internal method calls:\n\ndef test_add_user(fake_auth, fake_users_repo):\n    svc = UserService(fake_auth, fake_users_repo)\n    svc.add_user(email=\"test@example.com\", password=\"secret\", name=\"User\")\n    assert fake_auth.created == [{\"uid\": \"uid-1\", ...}]  # Implementation detail\n\n✅ Good - Testing observable behavior:\n\ndef test_add_user(fake_auth, fake_users_repo):\n    svc = UserService(fake_auth, fake_users_repo)\n    result = svc.add_user(email=\"test@example.com\", password=\"secret\", name=\"User\")\n    assert result[\"status\"] == \"ok\"\n    assert result[\"user\"][\"email\"] == \"test@example.com\""
      },
      {
        "title": "Don't Skip Error Paths",
        "body": "❌ Bad - Only happy path:\n\ndef test_add_user_success(fake_auth, fake_users_repo):\n    # Only tests success case\n\n✅ Good - Test all scenarios:\n\ndef test_add_user_success(fake_auth, fake_users_repo):\n    # Happy path\n\ndef test_add_user_rollback_on_firestore_failure(fake_auth, fake_users_repo):\n    # Error path\n\ndef test_add_user_handles_duplicate_email(fake_auth, fake_users_repo):\n    # Edge case"
      },
      {
        "title": "Running Tests",
        "body": "# Run all tests\npytest\n\n# Run with coverage\npytest --cov=myapp --cov-report=term-missing\n\n# Run specific test file\npytest tests/test_user_service.py\n\n# Run specific test\npytest tests/test_user_service.py::test_add_user_success\n\n# Run parametrized tests with verbose output\npytest -v tests/test_user_service.py::test_add_user_parametrized\n\n# Skip integration tests\npytest -m \"not integration\"\n\n# Run only integration tests\npytest -m integration\n\n# Stop on first failure\npytest -x\n\n# Show local variables on failure\npytest -l\n\n# Run tests in parallel (with pytest-xdist)\npytest -n auto"
      },
      {
        "title": "Best Practices Checklist",
        "body": "Use Fake classes instead of unittest.mock\n Structure tests with explicit AAA comments\n Use fixtures for test setup\n Inject dependencies between fixtures\n Parametrize tests for multiple scenarios\n Test happy paths and error paths\n Test edge cases and boundaries\n Write contract tests for interfaces\n Ensure test isolation\n Use descriptive test names\n Keep tests focused on one behavior\n Avoid testing implementation details\n Test at appropriate level (unit vs integration)\n Mock external dependencies appropriately\n Maintain test coverage"
      }
    ],
    "body": "Clean Pytest\n\nClean, maintainable pytest test patterns using Fake-based testing, contract testing, and dependency injection. Focuses on test isolation, reusability, and clarity through explicit AAA pattern and well-structured fixtures.\n\nWhen to Use\nSetting up test suites for Python/MCP projects\nCreating Fake implementations for external dependencies\nWriting contract tests for MCP tools/controllers\nImplementing test patterns with dependency injection\nTesting layered architectures (Controllers → Services → Repositories)\nWriting parametrized tests for multiple scenarios\nCore Principles\n1. Fakes over Mocks\n\nUse Fake classes instead of mocking with unittest.mock. Fakes are in-memory implementations that mimic real dependencies without external calls.\n\nWhy Fakes?\n\nMore readable and maintainable\nEasier to debug\nBetter test isolation\nNo monkey-patching magic\nSelf-documenting behavior\n2. Explicit AAA Pattern\n\nStructure every test into three clear phases with comments:\n\n# Arrange\n# Set up test data and dependencies\n\n# Act\n# Execute the code under test\n\n# Assert\n# Verify the result\n\n3. Dependency Injection in Fixtures\n\nInject dependencies between fixtures to maintain relationships and avoid duplication.\n\n4. Contract Testing\n\nVerify that components register tools/functions correctly and pass expected arguments.\n\nArchitecture Pattern\nController (MCP Tools)\n    ↓\nService (Business Logic)\n    ↓\nRepository (Data Access)\n    ↓\nFake (Test Implementation)\n\nCreating Fakes\nBasic Fake Structure\n\nCreate a Fake class that implements the same interface as the real dependency:\n\n# tests/fakes.py\nfrom typing import Any, Dict, List, Optional\n\nclass FakeAuth:\n    \"\"\"Fake implementation of AuthProvider for testing.\"\"\"\n    def __init__(self) -> None:\n        self.created: List[Dict[str, Any]] = []\n        self.deleted: List[str] = []\n        self._seq = 0\n        self.fail_on_create: bool = False\n\n    def create_user(self, email: str, password: str, display_name: str) -> str:\n        if self.fail_on_create:\n            raise RuntimeError(\"create_user failed (fake)\")\n        self._seq += 1\n        uid = f\"uid-{self._seq}\"\n        rec = {\"uid\": uid, \"email\": email, \"display_name\": display_name}\n        self.created.append(rec)\n        return uid\n\n    def delete_user(self, uid: str) -> None:\n        self.deleted.append(uid)\n\nRepository Fake\nclass FakeUsersRepo:\n    \"\"\"Fake implementation of UsersRepository.\"\"\"\n    def __init__(self) -> None:\n        self.users: Dict[str, Dict[str, Any]] = {}\n        self.fail_on_upsert: bool = False\n\n    def upsert_user_doc(self, uid: str, data: Dict[str, Any]) -> None:\n        if self.fail_on_upsert:\n            raise RuntimeError(\"upsert_user_doc failed (fake)\")\n        self.users[uid] = dict(data)\n\n    def list_users(self, limit: Optional[int] = None) -> List[Dict[str, Any]]:\n        items = list(self.users.values())\n        if limit and limit > 0:\n            items = items[:limit]\n        return [dict(it) for it in items]\n\nControlled Failure Fakes\nclass FakeAuth:\n    def __init__(self) -> None:\n        self.fail_on_create: bool = False  # Control failure in tests\n\n    def create_user(self, email: str, password: str, display_name: str) -> str:\n        if self.fail_on_create:\n            raise RuntimeError(\"create_user failed (fake)\")\n        # ... rest of implementation\n\nNested Repository Fakes\nclass FakeSectorsRepo:\n    def __init__(self, institutions: FakeInstitutionsRepo | None = None) -> None:\n        self.institutions = institutions  # Inject dependency\n        self.data: Dict[str, Dict[str, Dict[str, Any]]] = {}\n\n    def institution_exists(self, institution_id: str) -> bool:\n        return bool(self.institutions and institution_id in self.institutions.data)\n\n    def upsert_sector(self, institution_id: str, sector_id: str, data: Dict[str, Any]) -> None:\n        self.data.setdefault(institution_id, {})[sector_id] = dict(data)\n\nFixtures\nBasic Fixture (conftest.py)\n# tests/conftest.py\nimport pytest\nfrom tests.fakes import FakeAuth, FakeUsersRepo\n\n@pytest.fixture()\ndef fake_auth():\n    \"\"\"Provide a fresh FakeAuth for each test.\"\"\"\n    return FakeAuth()\n\n@pytest.fixture()\ndef fake_users_repo():\n    \"\"\"Provide a fresh FakeUsersRepo for each test.\"\"\"\n    return FakeUsersRepo()\n\nFixture with Dependency Injection\n@pytest.fixture()\ndef fake_sectors_repo(fake_institutions_repo):\n    \"\"\"FakeSectorsRepo depends on FakeInstitutionsRepo.\"\"\"\n    return FakeSectorsRepo(institutions=fake_institutions_repo)\n\n@pytest.fixture()\ndef fake_rooms_repo(fake_sectors_repo):\n    \"\"\"FakeRoomsRepo depends on FakeSectorsRepo.\"\"\"\n    return FakeRoomsRepo(sectors=fake_sectors_repo)\n\nEnvironment Fixture\n@pytest.fixture()\ndef user_env(fake_auth, fake_users_repo):\n    \"\"\"Provide service and all dependencies for user operations.\"\"\"\n    from myapp.services.user_service import UserService\n    svc = UserService(fake_auth, fake_users_repo)\n    return svc, fake_auth, fake_users_repo\n\nSeeded Environment Fixture\n@pytest.fixture()\ndef user_env_seeded(user_env):\n    \"\"\"Environment with pre-seeded data.\"\"\"\n    svc, auth, repo = user_env\n    svc.add_user(email=\"test@example.com\", password=\"secret\", name=\"Test User\")\n    return svc\n\nFixture with Cleanup\n@pytest.fixture()\ndef temp_file():\n    \"\"\"Provide a temporary file and clean up after test.\"\"\"\n    import tempfile\n    import os\n    fd, path = tempfile.mkstemp()\n    os.close(fd)\n    yield path\n    os.unlink(path)\n\nService Layer Testing\nBasic AAA Pattern Test\n# tests/test_user_service.py\nimport pytest\nfrom myapp.services.user_service import UserService\n\ndef test_add_user_success(fake_auth, fake_users_repo):\n    # Arrange\n    svc = UserService(fake_auth, fake_users_repo)\n    email = \"test@example.com\"\n    password = \"secret\"\n    name = \"Test User\"\n\n    # Act\n    result = svc.add_user(email=email, password=password, name=name)\n\n    # Assert\n    assert result[\"status\"] == \"ok\"\n    assert result[\"user\"][\"email\"] == email\n    assert result[\"user\"][\"name\"] == name\n    assert result[\"uid\"] in fake_users_repo.users\n\nParametrized Tests\n@pytest.mark.parametrize(\n    \"email,password,name,role\",\n    [\n        (\"a@example.com\", \"secret\", \"Alice\", \"admin\"),\n        (\"b@example.com\", \"p@ss\", \"Bob\", \"user\"),\n    ],\n)\ndef test_add_user_parametrized(user_env, email, password, name, role):\n    svc, _auth, _repo = user_env\n\n    # Act\n    res = svc.add_user(email=email, password=password, name=name, global_role=role)\n\n    # Assert\n    assert res[\"status\"] == \"ok\"\n    assert res[\"user\"][\"email\"] == email\n    assert res[\"user\"][\"name\"] == name\n    assert res[\"user\"][\"globalRole\"] == role\n\nTesting Error Scenarios with Fakes\n@pytest.mark.parametrize(\"email\", [\"c@example.com\", \"d@example.com\"])\ndef test_add_user_rollback_on_firestore_failure(fake_auth, fake_users_repo, email):\n    # Arrange\n    fake_users_repo.fail_on_upsert = True\n    svc = UserService(fake_auth, fake_users_repo)\n\n    # Act & Assert\n    with pytest.raises(RuntimeError):\n        svc.add_user(email=email, password=\"secret\", name=\"Bob\")\n\n    # Assert rollback\n    assert fake_auth.deleted, \"Expected auth user to be deleted on Firestore failure\"\n\nTesting Timestamp Normalization\ndef test_list_users_normalizes_timestamps_to_iso(user_env):\n    # Arrange\n    svc, _auth, repo = user_env\n    from datetime import datetime\n    repo.users[\"u1\"] = {\n        \"id\": \"u1\",\n        \"email\": \"x@y.z\",\n        \"name\": \"X\",\n        \"globalRole\": \"user\",\n        \"createdAt\": datetime(2024, 1, 1),\n        \"updatedAt\": datetime(2024, 1, 2),\n    }\n\n    # Act\n    res = svc.list_users(limit=10)\n\n    # Assert\n    assert res[\"status\"] == \"ok\"\n    assert res[\"count\"] == 1\n    user = res[\"users\"][0]\n    assert isinstance(user[\"createdAt\"], str)\n    assert isinstance(user[\"updatedAt\"], str)\n\nContract Testing\nMCP Tool Registration Contract\n\nTest that controllers properly register tools with expected signatures:\n\n# tests/test_controllers_contract.py\nfrom typing import Any, Callable, Dict\n\nclass FakeMCP:\n    \"\"\"Minimal FakeMCP for contract testing.\"\"\"\n    def __init__(self) -> None:\n        self.tools: Dict[str, Callable[..., Any]] = {}\n        self.meta: Dict[str, Dict[str, Any]] = {}\n\n    def tool(self, name: str, description: str, tags: Optional[set] = None, meta: Optional[dict] = None):\n        def decorator(fn: Callable[..., Any]):\n            self.tools[name] = fn\n            self.meta[name] = {\n                \"description\": description,\n                \"tags\": set(tags or set()),\n                \"meta\": dict(meta or {}),\n            }\n            return fn\n        return decorator\n\n\nclass FakeUserService:\n    \"\"\"Simple fake service that records calls.\"\"\"\n    def __init__(self):\n        self.calls = []\n\n    def add_user(self, **kwargs):\n        self.calls.append((\"add_user\", kwargs))\n        return {\"status\": \"ok\", \"op\": \"add_user\", \"args\": kwargs}\n\n\ndef test_users_controller_contract():\n    # Arrange\n    from myapp.controllers.users_controller import UsersController\n    fake = FakeMCP()\n    svc = FakeUserService()\n    UsersController(fake, svc)\n\n    # Assert tool registration\n    assert \"add_user\" in fake.tools\n    assert \"list_users\" in fake.tools\n\n    # Act & Assert tool behavior\n    res = fake.tools[\"add_user\"](\n        email=\"a@x.y\", password=\"s3cr3t\", name=\"Alice\", global_role=\"admin\"\n    )\n    assert res[\"status\"] == \"ok\"\n    assert res[\"op\"] == \"add_user\"\n    assert res[\"args\"][\"email\"] == \"a@x.y\"\n\nParametrized Contract Tests\n@pytest.mark.parametrize(\n    \"email,password,name,role\",\n    [\n        (\"a@x.y\", \"s3cr3t\", \"Alice\", \"admin\"),\n        (\"b@x.y\", \"p@ssw0rd\", \"Bob\", \"user\"),\n    ],\n)\ndef test_users_add_user_parametrized(_users_env, email, password, name, role):\n    # Arrange\n    fake, _ = _users_env\n\n    # Act\n    res = fake.tools[\"add_user\"](\n        email=email, password=password, name=name, global_role=role\n    )\n\n    # Assert\n    assert res[\"status\"] == \"ok\"\n    assert res[\"op\"] == \"add_user\"\n    assert res[\"args\"][\"email\"] == email\n\nRepository Layer Testing\nTesting Repository Operations\n@pytest.fixture()\ndef repo_env(fake_institutions_repo, fake_sectors_repo):\n    # Seed data\n    fake_institutions_repo.upsert(\"inst1\", {\"id\": \"inst1\", \"name\": \"Inst One\"})\n    fake_sectors_repo.upsert_sector(\n        \"inst1\", \"er\", {\"id\": \"er\", \"name\": \"ER\", \"slug\": \"er\", \"isActive\": True}\n    )\n    return fake_sectors_repo\n\nTesting Multiple Data Scenarios\n@pytest.mark.parametrize(\"rooms\", [\n    [\"101\"],\n    [\"201\", {\"name\": \"102\", \"id\": \"room-102\"}],\n])\ndef test_add_and_list_rooms(room_env, rooms):\n    svc, _ = room_env\n\n    # Act\n    res = svc.add_sector_rooms(\"inst1\", \"er\", rooms)\n\n    # Assert\n    assert res[\"status\"] == \"ok\"\n    assert res[\"count\"] == len(rooms)\n\n    lst = svc.list_sector_rooms(\"inst1\", \"er\", limit=10)\n    assert lst[\"status\"] == \"ok\"\n    assert lst[\"count\"] == len(rooms)\n\nTesting Limit Behavior\n@pytest.mark.parametrize(\"limit\", [1, 3])\ndef test_list_rooms_limits(room_env_seeded, limit):\n    svc = room_env_seeded\n\n    # Act\n    lst = svc.list_sector_rooms(\"inst1\", \"er\", limit=limit)\n\n    # Assert\n    assert lst[\"status\"] == \"ok\"\n    assert lst[\"count\"] == min(2, limit)  # 2 items seeded\n\nTesting Not Found Scenarios\n@pytest.mark.parametrize(\"room_id,deleted\", [\n    (\"room-102\", True),\n    (\"room-999\", False),\n])\ndef test_remove_rooms_parametrized(room_env_seeded, room_id, deleted):\n    svc = room_env_seeded\n\n    # Act\n    res = svc.remove_sector_room(\"inst1\", \"er\", room_id)\n\n    # Assert\n    assert res[\"deleted\"] is deleted\n    if not deleted:\n        assert res.get(\"reason\") == \"room_not_found\"\n\nIntegration Testing\nConditional Integration Tests\n\nSkip integration tests when external dependencies are not available:\n\n# tests/test_integration_wiring.py\nimport os\nimport pytest\n\n# Gate this integration test on presence of credentials\n_ENV_KEYS = (\n    \"FIREBASE_SERVICE_ACCOUNT\",\n    \"GOOGLE_APPLICATION_CREDENTIALS\",\n)\n_has_env_creds = any(os.getenv(k) for k in _ENV_KEYS)\n\npytestmark = [\n    pytest.mark.integration,\n    pytest.mark.skipif(\n        not _has_env_creds,\n        reason=(\n            \"Integration test requires Firebase Admin credentials via env \"\n            \"(FIREBASE_SERVICE_ACCOUNT or GOOGLE_APPLICATION_CREDENTIALS)\"\n        ),\n    ),\n]\n\n@pytest.mark.integration\ndef test_build_app_initializes_and_registers_tools():\n    # Arrange\n    from myapp.wiring import build_app\n\n    # Act\n    app = build_app()\n\n    # Assert\n    assert hasattr(app, \"run\")\n\nTest Isolation\n\nEach test should be independent and not share state:\n\ndef test_user_created_in_one_test_not_visible_in_another(fake_auth, fake_users_repo):\n    # Arrange\n    svc1 = UserService(fake_auth, fake_users_repo)\n\n    # Act\n    result1 = svc1.add_user(email=\"test1@example.com\", password=\"secret\", name=\"User1\")\n\n    # Assert - second test with fresh fixtures should not see this user\n    svc2 = UserService(fake_auth, fake_users_repo)\n    users = svc2.list_users()\n    assert users[\"count\"] == 1  # Only the user from this test\n\nTesting Anti-Patterns to Avoid\nDon't Mock What You Don't Own\n\n❌ Bad - Mocking external library:\n\n@patch('firebase_admin.auth.create_user')\ndef test_add_user(mock_create_user):\n    mock_create_user.return_value = Mock(uid=\"uid-1\")\n    # ... test code\n\n\n✅ Good - Use Fake for your interface:\n\ndef test_add_user(fake_auth, fake_users_repo):\n    svc = UserService(fake_auth, fake_users_repo)\n    # ... test code\n\nDon't Test Implementation Details\n\n❌ Bad - Testing internal method calls:\n\ndef test_add_user(fake_auth, fake_users_repo):\n    svc = UserService(fake_auth, fake_users_repo)\n    svc.add_user(email=\"test@example.com\", password=\"secret\", name=\"User\")\n    assert fake_auth.created == [{\"uid\": \"uid-1\", ...}]  # Implementation detail\n\n\n✅ Good - Testing observable behavior:\n\ndef test_add_user(fake_auth, fake_users_repo):\n    svc = UserService(fake_auth, fake_users_repo)\n    result = svc.add_user(email=\"test@example.com\", password=\"secret\", name=\"User\")\n    assert result[\"status\"] == \"ok\"\n    assert result[\"user\"][\"email\"] == \"test@example.com\"\n\nDon't Skip Error Paths\n\n❌ Bad - Only happy path:\n\ndef test_add_user_success(fake_auth, fake_users_repo):\n    # Only tests success case\n\n\n✅ Good - Test all scenarios:\n\ndef test_add_user_success(fake_auth, fake_users_repo):\n    # Happy path\n\ndef test_add_user_rollback_on_firestore_failure(fake_auth, fake_users_repo):\n    # Error path\n\ndef test_add_user_handles_duplicate_email(fake_auth, fake_users_repo):\n    # Edge case\n\nRunning Tests\n# Run all tests\npytest\n\n# Run with coverage\npytest --cov=myapp --cov-report=term-missing\n\n# Run specific test file\npytest tests/test_user_service.py\n\n# Run specific test\npytest tests/test_user_service.py::test_add_user_success\n\n# Run parametrized tests with verbose output\npytest -v tests/test_user_service.py::test_add_user_parametrized\n\n# Skip integration tests\npytest -m \"not integration\"\n\n# Run only integration tests\npytest -m integration\n\n# Stop on first failure\npytest -x\n\n# Show local variables on failure\npytest -l\n\n# Run tests in parallel (with pytest-xdist)\npytest -n auto\n\nBest Practices Checklist\n Use Fake classes instead of unittest.mock\n Structure tests with explicit AAA comments\n Use fixtures for test setup\n Inject dependencies between fixtures\n Parametrize tests for multiple scenarios\n Test happy paths and error paths\n Test edge cases and boundaries\n Write contract tests for interfaces\n Ensure test isolation\n Use descriptive test names\n Keep tests focused on one behavior\n Avoid testing implementation details\n Test at appropriate level (unit vs integration)\n Mock external dependencies appropriately\n Maintain test coverage"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/marcoracer/clean-pytest",
    "publisherUrl": "https://clawhub.ai/marcoracer/clean-pytest",
    "owner": "marcoracer",
    "version": "0.1.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/clean-pytest",
    "downloadUrl": "https://openagent3.xyz/downloads/clean-pytest",
    "agentUrl": "https://openagent3.xyz/skills/clean-pytest/agent",
    "manifestUrl": "https://openagent3.xyz/skills/clean-pytest/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/clean-pytest/agent.md"
  }
}