{
  "schemaVersion": "1.0",
  "item": {
    "slug": "backend-event-stores",
    "name": "Backend Event Stores",
    "source": "tencent",
    "type": "skill",
    "category": "AI 智能",
    "sourceUrl": "https://clawhub.ai/wpank/backend-event-stores",
    "canonicalUrl": "https://clawhub.ai/wpank/backend-event-stores",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/backend-event-stores",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=backend-event-stores",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "README.md",
      "SKILL.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-23T16:43:11.935Z",
      "expiresAt": "2026-04-30T16:43:11.935Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
        "contentDisposition": "attachment; filename=\"4claw-imageboard-1.0.1.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/backend-event-stores"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/backend-event-stores",
    "agentPageUrl": "https://openagent3.xyz/skills/backend-event-stores/agent",
    "manifestUrl": "https://openagent3.xyz/skills/backend-event-stores/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/backend-event-stores/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Event Store",
        "body": "Guide to designing event stores for event-sourced applications — covering event schemas, projections, snapshotting, and CQRS integration."
      },
      {
        "title": "When to Use This Skill",
        "body": "Designing event sourcing infrastructure\nChoosing between event store technologies\nImplementing custom event stores\nBuilding projections from event streams\nAdding snapshotting for aggregate performance\nIntegrating CQRS with event sourcing"
      },
      {
        "title": "Event Store Architecture",
        "body": "┌─────────────────────────────────────────────────────┐\n│                    Event Store                       │\n├─────────────────────────────────────────────────────┤\n│  ┌─────────────┐  ┌─────────────┐  ┌─────────────┐ │\n│  │   Stream 1   │  │   Stream 2   │  │   Stream 3   │ │\n│  │ (Aggregate)  │  │ (Aggregate)  │  │ (Aggregate)  │ │\n│  ├─────────────┤  ├─────────────┤  ├─────────────┤ │\n│  │ Event 1     │  │ Event 1     │  │ Event 1     │ │\n│  │ Event 2     │  │ Event 2     │  │ Event 2     │ │\n│  │ Event 3     │  │ ...         │  │ Event 3     │ │\n│  │ ...         │  │             │  │ Event 4     │ │\n│  └─────────────┘  └─────────────┘  └─────────────┘ │\n├─────────────────────────────────────────────────────┤\n│  Global Position: 1 → 2 → 3 → 4 → 5 → 6 → ...     │\n└─────────────────────────────────────────────────────┘"
      },
      {
        "title": "Event Store Requirements",
        "body": "RequirementDescriptionAppend-onlyEvents are immutable, only appendsOrderedPer-stream and global orderingVersionedOptimistic concurrency controlSubscriptionsReal-time event notificationsIdempotentHandle duplicate writes safely"
      },
      {
        "title": "Technology Comparison",
        "body": "TechnologyBest ForLimitationsEventStoreDBPure event sourcingSingle-purposePostgreSQLExisting Postgres stackManual implementationKafkaHigh-throughput streamsNot ideal for per-stream queriesDynamoDBServerless, AWS-nativeQuery limitations"
      },
      {
        "title": "Event Schema Design",
        "body": "Events are the source of truth. Well-designed schemas ensure long-term evolvability."
      },
      {
        "title": "Event Envelope Structure",
        "body": "{\n  \"event_id\": \"uuid\",\n  \"stream_id\": \"Order-abc123\",\n  \"event_type\": \"OrderPlaced\",\n  \"version\": 1,\n  \"schema_version\": 1,\n  \"data\": {\n    \"customer_id\": \"cust-1\",\n    \"total_cents\": 5000\n  },\n  \"metadata\": {\n    \"correlation_id\": \"req-xyz\",\n    \"causation_id\": \"evt-prev\",\n    \"user_id\": \"user-1\",\n    \"timestamp\": \"2025-01-15T10:30:00Z\"\n  },\n  \"global_position\": 42\n}"
      },
      {
        "title": "Schema Evolution Rules",
        "body": "Add fields freely — new optional fields are always safe\nNever remove or rename fields — introduce a new event type instead\nVersion event types — OrderPlacedV2 when the schema changes materially\nUpcast on read — transform old versions to the current shape in the deserializer"
      },
      {
        "title": "PostgreSQL Event Store Schema",
        "body": "CREATE TABLE events (\n    id UUID PRIMARY KEY DEFAULT gen_random_uuid(),\n    stream_id VARCHAR(255) NOT NULL,\n    stream_type VARCHAR(255) NOT NULL,\n    event_type VARCHAR(255) NOT NULL,\n    event_data JSONB NOT NULL,\n    metadata JSONB DEFAULT '{}',\n    version BIGINT NOT NULL,\n    global_position BIGSERIAL,\n    created_at TIMESTAMPTZ DEFAULT NOW(),\n    CONSTRAINT unique_stream_version UNIQUE (stream_id, version)\n);\n\nCREATE INDEX idx_events_stream ON events(stream_id, version);\nCREATE INDEX idx_events_global ON events(global_position);\nCREATE INDEX idx_events_type ON events(event_type);\n\nCREATE TABLE snapshots (\n    stream_id VARCHAR(255) PRIMARY KEY,\n    stream_type VARCHAR(255) NOT NULL,\n    snapshot_data JSONB NOT NULL,\n    version BIGINT NOT NULL,\n    created_at TIMESTAMPTZ DEFAULT NOW()\n);\n\nCREATE TABLE subscription_checkpoints (\n    subscription_id VARCHAR(255) PRIMARY KEY,\n    last_position BIGINT NOT NULL DEFAULT 0,\n    updated_at TIMESTAMPTZ DEFAULT NOW()\n);"
      },
      {
        "title": "Event Store Implementation",
        "body": "@dataclass\nclass Event:\n    stream_id: str\n    event_type: str\n    data: dict\n    metadata: dict = field(default_factory=dict)\n    event_id: UUID = field(default_factory=uuid4)\n    version: int | None = None\n    global_position: int | None = None\n\nclass EventStore:  # backed by PostgreSQL schema above\n    def __init__(self, pool: asyncpg.Pool):\n        self.pool = pool\n\n    async def append(self, stream_id: str, stream_type: str,\n                     events: list[Event],\n                     expected_version: int | None = None) -> list[Event]:\n        \"\"\"Append events with optimistic concurrency control.\"\"\"\n        async with self.pool.acquire() as conn:\n            async with conn.transaction():\n                if expected_version is not None:\n                    current = await conn.fetchval(\n                        \"SELECT MAX(version) FROM events \"\n                        \"WHERE stream_id = $1\", stream_id\n                    ) or 0\n                    if current != expected_version:\n                        raise ConcurrencyError(\n                            f\"Expected {expected_version}, got {current}\"\n                        )\n\n                start = await conn.fetchval(\n                    \"SELECT COALESCE(MAX(version), 0) + 1 \"\n                    \"FROM events WHERE stream_id = $1\", stream_id\n                )\n                for i, evt in enumerate(events):\n                    evt.version = start + i\n                    row = await conn.fetchrow(\n                        \"INSERT INTO events (id, stream_id, stream_type, \"\n                        \"event_type, event_data, metadata, version) \"\n                        \"VALUES ($1,$2,$3,$4,$5,$6,$7) \"\n                        \"RETURNING global_position\",\n                        evt.event_id, stream_id, stream_type,\n                        evt.event_type, json.dumps(evt.data),\n                        json.dumps(evt.metadata), evt.version,\n                    )\n                    evt.global_position = row[\"global_position\"]\n                return events\n\n    async def read_stream(self, stream_id: str,\n                          from_version: int = 0) -> list[Event]:\n        \"\"\"Read events for a single stream.\"\"\"\n        async with self.pool.acquire() as conn:\n            rows = await conn.fetch(\n                \"SELECT * FROM events WHERE stream_id = $1 \"\n                \"AND version >= $2 ORDER BY version\",\n                stream_id, from_version,\n            )\n            return [self._to_event(r) for r in rows]\n\n    async def read_all(self, from_position: int = 0,\n                       limit: int = 1000) -> list[Event]:\n        \"\"\"Read global event stream for projections / subscriptions.\"\"\"\n        async with self.pool.acquire() as conn:\n            rows = await conn.fetch(\n                \"SELECT * FROM events WHERE global_position > $1 \"\n                \"ORDER BY global_position LIMIT $2\",\n                from_position, limit,\n            )\n            return [self._to_event(r) for r in rows]"
      },
      {
        "title": "Projections",
        "body": "Projections build read-optimised views by replaying events. They are the \"Q\" side of CQRS."
      },
      {
        "title": "Projection Lifecycle",
        "body": "Start from checkpoint — resume from last processed global position\nApply events — update the read model for each relevant event type\nSave checkpoint — persist the new position atomically with the read model"
      },
      {
        "title": "Projection Example",
        "body": "class OrderSummaryProjection:\n    def __init__(self, db, event_store: EventStore):\n        self.db = db\n        self.store = event_store\n\n    async def run(self, batch_size: int = 100):\n        position = await self._load_checkpoint()\n        while True:\n            events = await self.store.read_all(position, batch_size)\n            if not events:\n                await asyncio.sleep(1)\n                continue\n            for evt in events:\n                await self._apply(evt)\n                position = evt.global_position\n            await self._save_checkpoint(position)\n\n    async def _apply(self, event: Event):\n        match event.event_type:\n            case \"OrderPlaced\":\n                await self.db.execute(\n                    \"INSERT INTO order_summaries (id, customer, total, status) \"\n                    \"VALUES ($1,$2,$3,'placed')\",\n                    event.data[\"order_id\"], event.data[\"customer_id\"],\n                    event.data[\"total_cents\"],\n                )\n            case \"OrderShipped\":\n                await self.db.execute(\n                    \"UPDATE order_summaries SET status='shipped' \"\n                    \"WHERE id=$1\", event.data[\"order_id\"],\n                )"
      },
      {
        "title": "Projection Design Rules",
        "body": "Idempotent handlers — replaying the same event twice must not corrupt state\nOne projection per read model — keep projections focused\nRebuild from scratch — projections should be deletable and fully replayable\nSeparate storage — projections can live in different databases (Postgres, Elasticsearch, Redis)"
      },
      {
        "title": "Snapshotting",
        "body": "Snapshots accelerate aggregate rehydration by caching state at a known version.\n\nUse when streams exceed ~100 events, aggregates have expensive rehydration, or on a cadence (e.g., every 50 events)."
      },
      {
        "title": "Snapshot Flow",
        "body": "class SnapshottedRepository:\n    def __init__(self, event_store: EventStore, pool):\n        self.store = event_store\n        self.pool = pool\n\n    async def load(self, stream_id: str) -> Aggregate:\n        # 1. Try loading snapshot\n        snap = await self._load_snapshot(stream_id)\n        from_version = 0\n        aggregate = Aggregate(stream_id)\n\n        if snap:\n            aggregate.restore(snap[\"data\"])\n            from_version = snap[\"version\"] + 1\n\n        # 2. Replay events after snapshot\n        events = await self.store.read_stream(stream_id, from_version)\n        for evt in events:\n            aggregate.apply(evt)\n\n        # 3. Snapshot if too many events replayed\n        if len(events) > 50:\n            await self._save_snapshot(\n                stream_id, aggregate.snapshot(), aggregate.version\n            )\n\n        return aggregate"
      },
      {
        "title": "CQRS Integration",
        "body": "CQRS separates the write model (commands → events) from the read model (projections).\n\nCommands ──► Aggregate ──► Event Store ──► Projections ──► Query API\n (write)     (domain)      (append)        (build)        (read)"
      },
      {
        "title": "Key Principles",
        "body": "Write side validates commands, emits events, enforces invariants\nRead side subscribes to events, builds optimised query models\nEventual consistency — reads may lag behind writes by milliseconds to seconds\nIndependent scaling — scale reads and writes separately"
      },
      {
        "title": "Command Handler Pattern",
        "body": "class PlaceOrderHandler:\n    def __init__(self, event_store: EventStore):\n        self.store = event_store\n\n    async def handle(self, cmd: PlaceOrderCommand):\n        # Load aggregate from events\n        events = await self.store.read_stream(f\"Order-{cmd.order_id}\")\n        order = Order.reconstitute(events)\n\n        # Execute command — validates and produces new events\n        new_events = order.place(cmd.customer_id, cmd.items)\n\n        # Persist with concurrency check\n        await self.store.append(\n            f\"Order-{cmd.order_id}\", \"Order\", new_events,\n            expected_version=order.version,\n        )"
      },
      {
        "title": "EventStoreDB Integration",
        "body": "from esdbclient import EventStoreDBClient, NewEvent, StreamState\nimport json\n\nclient = EventStoreDBClient(uri=\"esdb://localhost:2113?tls=false\")\n\ndef append_events(stream_name: str, events: list, expected_revision=None):\n    new_events = [\n        NewEvent(\n            type=event['type'],\n            data=json.dumps(event['data']).encode(),\n            metadata=json.dumps(event.get('metadata', {})).encode()\n        )\n        for event in events\n    ]\n    state = (StreamState.ANY if expected_revision is None\n             else StreamState.NO_STREAM if expected_revision == -1\n             else expected_revision)\n    return client.append_to_stream(stream_name, new_events, current_version=state)\n\ndef read_stream(stream_name: str, from_revision: int = 0):\n    return [\n        {'type': e.type, 'data': json.loads(e.data),\n         'stream_position': e.stream_position}\n        for e in client.get_stream(stream_name, stream_position=from_revision)\n    ]\n\n# Category projection: read all events for Order-* streams\ndef read_category(category: str):\n    return read_stream(f\"$ce-{category}\")"
      },
      {
        "title": "DynamoDB Event Store",
        "body": "import boto3\nfrom boto3.dynamodb.conditions import Key\nfrom datetime import datetime\nimport json, uuid\n\nclass DynamoEventStore:\n    def __init__(self, table_name: str):\n        self.table = boto3.resource('dynamodb').Table(table_name)\n\n    def append(self, stream_id: str, events: list, expected_version: int = 0):\n        with self.table.batch_writer() as batch:\n            for i, event in enumerate(events):\n                version = expected_version + i + 1\n                batch.put_item(Item={\n                    'PK': f\"STREAM#{stream_id}\",\n                    'SK': f\"VERSION#{version:020d}\",\n                    'GSI1PK': 'EVENTS',\n                    'GSI1SK': datetime.utcnow().isoformat(),\n                    'event_id': str(uuid.uuid4()),\n                    'event_type': event['type'],\n                    'event_data': json.dumps(event['data']),\n                    'version': version,\n                })\n\n    def read_stream(self, stream_id: str, from_version: int = 0):\n        resp = self.table.query(\n            KeyConditionExpression=\n                Key('PK').eq(f\"STREAM#{stream_id}\") &\n                Key('SK').gte(f\"VERSION#{from_version:020d}\")\n        )\n        return [\n            {'event_type': item['event_type'],\n             'data': json.loads(item['event_data']),\n             'version': item['version']}\n            for item in resp['Items']\n        ]\n\nDynamoDB table design: PK=STREAM#{id}, SK=VERSION#{version}, GSI1 for global ordering."
      },
      {
        "title": "Do",
        "body": "Name streams {Type}-{id} — e.g., Order-abc123\nInclude correlation / causation IDs in metadata for tracing\nVersion event schemas from day one — plan for evolution\nImplement idempotent writes — use event IDs for deduplication\nIndex for your query patterns — stream, global position, event type"
      },
      {
        "title": "Don't",
        "body": "Mutate or delete events — they are immutable facts\nStore large payloads — keep events small; reference blobs externally\nSkip optimistic concurrency — prevents data corruption\nIgnore backpressure — handle slow consumers gracefully\nCouple projections to the write model — projections should be independently deployable"
      },
      {
        "title": "NEVER Do",
        "body": "NEVER update or delete events — Events are immutable historical facts; create compensating events instead\nNEVER skip version checks on append — Optimistic concurrency prevents lost updates and corruption\nNEVER embed large blobs in events — Store blobs externally, reference by ID in the event\nNEVER use random UUIDs for event IDs without idempotency checks — Retries create duplicates\nNEVER read projections for command validation — Use the event stream as the source of truth\nNEVER couple projections to the write transaction — Projections must be rebuildable independently"
      }
    ],
    "body": "Event Store\n\nGuide to designing event stores for event-sourced applications — covering event schemas, projections, snapshotting, and CQRS integration.\n\nWhen to Use This Skill\nDesigning event sourcing infrastructure\nChoosing between event store technologies\nImplementing custom event stores\nBuilding projections from event streams\nAdding snapshotting for aggregate performance\nIntegrating CQRS with event sourcing\nCore Concepts\nEvent Store Architecture\n┌─────────────────────────────────────────────────────┐\n│                    Event Store                       │\n├─────────────────────────────────────────────────────┤\n│  ┌─────────────┐  ┌─────────────┐  ┌─────────────┐ │\n│  │   Stream 1   │  │   Stream 2   │  │   Stream 3   │ │\n│  │ (Aggregate)  │  │ (Aggregate)  │  │ (Aggregate)  │ │\n│  ├─────────────┤  ├─────────────┤  ├─────────────┤ │\n│  │ Event 1     │  │ Event 1     │  │ Event 1     │ │\n│  │ Event 2     │  │ Event 2     │  │ Event 2     │ │\n│  │ Event 3     │  │ ...         │  │ Event 3     │ │\n│  │ ...         │  │             │  │ Event 4     │ │\n│  └─────────────┘  └─────────────┘  └─────────────┘ │\n├─────────────────────────────────────────────────────┤\n│  Global Position: 1 → 2 → 3 → 4 → 5 → 6 → ...     │\n└─────────────────────────────────────────────────────┘\n\nEvent Store Requirements\nRequirement\tDescription\nAppend-only\tEvents are immutable, only appends\nOrdered\tPer-stream and global ordering\nVersioned\tOptimistic concurrency control\nSubscriptions\tReal-time event notifications\nIdempotent\tHandle duplicate writes safely\nTechnology Comparison\nTechnology\tBest For\tLimitations\nEventStoreDB\tPure event sourcing\tSingle-purpose\nPostgreSQL\tExisting Postgres stack\tManual implementation\nKafka\tHigh-throughput streams\tNot ideal for per-stream queries\nDynamoDB\tServerless, AWS-native\tQuery limitations\nEvent Schema Design\n\nEvents are the source of truth. Well-designed schemas ensure long-term evolvability.\n\nEvent Envelope Structure\n{\n  \"event_id\": \"uuid\",\n  \"stream_id\": \"Order-abc123\",\n  \"event_type\": \"OrderPlaced\",\n  \"version\": 1,\n  \"schema_version\": 1,\n  \"data\": {\n    \"customer_id\": \"cust-1\",\n    \"total_cents\": 5000\n  },\n  \"metadata\": {\n    \"correlation_id\": \"req-xyz\",\n    \"causation_id\": \"evt-prev\",\n    \"user_id\": \"user-1\",\n    \"timestamp\": \"2025-01-15T10:30:00Z\"\n  },\n  \"global_position\": 42\n}\n\nSchema Evolution Rules\nAdd fields freely — new optional fields are always safe\nNever remove or rename fields — introduce a new event type instead\nVersion event types — OrderPlacedV2 when the schema changes materially\nUpcast on read — transform old versions to the current shape in the deserializer\nPostgreSQL Event Store Schema\nCREATE TABLE events (\n    id UUID PRIMARY KEY DEFAULT gen_random_uuid(),\n    stream_id VARCHAR(255) NOT NULL,\n    stream_type VARCHAR(255) NOT NULL,\n    event_type VARCHAR(255) NOT NULL,\n    event_data JSONB NOT NULL,\n    metadata JSONB DEFAULT '{}',\n    version BIGINT NOT NULL,\n    global_position BIGSERIAL,\n    created_at TIMESTAMPTZ DEFAULT NOW(),\n    CONSTRAINT unique_stream_version UNIQUE (stream_id, version)\n);\n\nCREATE INDEX idx_events_stream ON events(stream_id, version);\nCREATE INDEX idx_events_global ON events(global_position);\nCREATE INDEX idx_events_type ON events(event_type);\n\nCREATE TABLE snapshots (\n    stream_id VARCHAR(255) PRIMARY KEY,\n    stream_type VARCHAR(255) NOT NULL,\n    snapshot_data JSONB NOT NULL,\n    version BIGINT NOT NULL,\n    created_at TIMESTAMPTZ DEFAULT NOW()\n);\n\nCREATE TABLE subscription_checkpoints (\n    subscription_id VARCHAR(255) PRIMARY KEY,\n    last_position BIGINT NOT NULL DEFAULT 0,\n    updated_at TIMESTAMPTZ DEFAULT NOW()\n);\n\nEvent Store Implementation\n@dataclass\nclass Event:\n    stream_id: str\n    event_type: str\n    data: dict\n    metadata: dict = field(default_factory=dict)\n    event_id: UUID = field(default_factory=uuid4)\n    version: int | None = None\n    global_position: int | None = None\n\nclass EventStore:  # backed by PostgreSQL schema above\n    def __init__(self, pool: asyncpg.Pool):\n        self.pool = pool\n\n    async def append(self, stream_id: str, stream_type: str,\n                     events: list[Event],\n                     expected_version: int | None = None) -> list[Event]:\n        \"\"\"Append events with optimistic concurrency control.\"\"\"\n        async with self.pool.acquire() as conn:\n            async with conn.transaction():\n                if expected_version is not None:\n                    current = await conn.fetchval(\n                        \"SELECT MAX(version) FROM events \"\n                        \"WHERE stream_id = $1\", stream_id\n                    ) or 0\n                    if current != expected_version:\n                        raise ConcurrencyError(\n                            f\"Expected {expected_version}, got {current}\"\n                        )\n\n                start = await conn.fetchval(\n                    \"SELECT COALESCE(MAX(version), 0) + 1 \"\n                    \"FROM events WHERE stream_id = $1\", stream_id\n                )\n                for i, evt in enumerate(events):\n                    evt.version = start + i\n                    row = await conn.fetchrow(\n                        \"INSERT INTO events (id, stream_id, stream_type, \"\n                        \"event_type, event_data, metadata, version) \"\n                        \"VALUES ($1,$2,$3,$4,$5,$6,$7) \"\n                        \"RETURNING global_position\",\n                        evt.event_id, stream_id, stream_type,\n                        evt.event_type, json.dumps(evt.data),\n                        json.dumps(evt.metadata), evt.version,\n                    )\n                    evt.global_position = row[\"global_position\"]\n                return events\n\n    async def read_stream(self, stream_id: str,\n                          from_version: int = 0) -> list[Event]:\n        \"\"\"Read events for a single stream.\"\"\"\n        async with self.pool.acquire() as conn:\n            rows = await conn.fetch(\n                \"SELECT * FROM events WHERE stream_id = $1 \"\n                \"AND version >= $2 ORDER BY version\",\n                stream_id, from_version,\n            )\n            return [self._to_event(r) for r in rows]\n\n    async def read_all(self, from_position: int = 0,\n                       limit: int = 1000) -> list[Event]:\n        \"\"\"Read global event stream for projections / subscriptions.\"\"\"\n        async with self.pool.acquire() as conn:\n            rows = await conn.fetch(\n                \"SELECT * FROM events WHERE global_position > $1 \"\n                \"ORDER BY global_position LIMIT $2\",\n                from_position, limit,\n            )\n            return [self._to_event(r) for r in rows]\n\nProjections\n\nProjections build read-optimised views by replaying events. They are the \"Q\" side of CQRS.\n\nProjection Lifecycle\nStart from checkpoint — resume from last processed global position\nApply events — update the read model for each relevant event type\nSave checkpoint — persist the new position atomically with the read model\nProjection Example\nclass OrderSummaryProjection:\n    def __init__(self, db, event_store: EventStore):\n        self.db = db\n        self.store = event_store\n\n    async def run(self, batch_size: int = 100):\n        position = await self._load_checkpoint()\n        while True:\n            events = await self.store.read_all(position, batch_size)\n            if not events:\n                await asyncio.sleep(1)\n                continue\n            for evt in events:\n                await self._apply(evt)\n                position = evt.global_position\n            await self._save_checkpoint(position)\n\n    async def _apply(self, event: Event):\n        match event.event_type:\n            case \"OrderPlaced\":\n                await self.db.execute(\n                    \"INSERT INTO order_summaries (id, customer, total, status) \"\n                    \"VALUES ($1,$2,$3,'placed')\",\n                    event.data[\"order_id\"], event.data[\"customer_id\"],\n                    event.data[\"total_cents\"],\n                )\n            case \"OrderShipped\":\n                await self.db.execute(\n                    \"UPDATE order_summaries SET status='shipped' \"\n                    \"WHERE id=$1\", event.data[\"order_id\"],\n                )\n\nProjection Design Rules\nIdempotent handlers — replaying the same event twice must not corrupt state\nOne projection per read model — keep projections focused\nRebuild from scratch — projections should be deletable and fully replayable\nSeparate storage — projections can live in different databases (Postgres, Elasticsearch, Redis)\nSnapshotting\n\nSnapshots accelerate aggregate rehydration by caching state at a known version.\n\nUse when streams exceed ~100 events, aggregates have expensive rehydration, or on a cadence (e.g., every 50 events).\n\nSnapshot Flow\nclass SnapshottedRepository:\n    def __init__(self, event_store: EventStore, pool):\n        self.store = event_store\n        self.pool = pool\n\n    async def load(self, stream_id: str) -> Aggregate:\n        # 1. Try loading snapshot\n        snap = await self._load_snapshot(stream_id)\n        from_version = 0\n        aggregate = Aggregate(stream_id)\n\n        if snap:\n            aggregate.restore(snap[\"data\"])\n            from_version = snap[\"version\"] + 1\n\n        # 2. Replay events after snapshot\n        events = await self.store.read_stream(stream_id, from_version)\n        for evt in events:\n            aggregate.apply(evt)\n\n        # 3. Snapshot if too many events replayed\n        if len(events) > 50:\n            await self._save_snapshot(\n                stream_id, aggregate.snapshot(), aggregate.version\n            )\n\n        return aggregate\n\nCQRS Integration\n\nCQRS separates the write model (commands → events) from the read model (projections).\n\nCommands ──► Aggregate ──► Event Store ──► Projections ──► Query API\n (write)     (domain)      (append)        (build)        (read)\n\nKey Principles\nWrite side validates commands, emits events, enforces invariants\nRead side subscribes to events, builds optimised query models\nEventual consistency — reads may lag behind writes by milliseconds to seconds\nIndependent scaling — scale reads and writes separately\nCommand Handler Pattern\nclass PlaceOrderHandler:\n    def __init__(self, event_store: EventStore):\n        self.store = event_store\n\n    async def handle(self, cmd: PlaceOrderCommand):\n        # Load aggregate from events\n        events = await self.store.read_stream(f\"Order-{cmd.order_id}\")\n        order = Order.reconstitute(events)\n\n        # Execute command — validates and produces new events\n        new_events = order.place(cmd.customer_id, cmd.items)\n\n        # Persist with concurrency check\n        await self.store.append(\n            f\"Order-{cmd.order_id}\", \"Order\", new_events,\n            expected_version=order.version,\n        )\n\nEventStoreDB Integration\nfrom esdbclient import EventStoreDBClient, NewEvent, StreamState\nimport json\n\nclient = EventStoreDBClient(uri=\"esdb://localhost:2113?tls=false\")\n\ndef append_events(stream_name: str, events: list, expected_revision=None):\n    new_events = [\n        NewEvent(\n            type=event['type'],\n            data=json.dumps(event['data']).encode(),\n            metadata=json.dumps(event.get('metadata', {})).encode()\n        )\n        for event in events\n    ]\n    state = (StreamState.ANY if expected_revision is None\n             else StreamState.NO_STREAM if expected_revision == -1\n             else expected_revision)\n    return client.append_to_stream(stream_name, new_events, current_version=state)\n\ndef read_stream(stream_name: str, from_revision: int = 0):\n    return [\n        {'type': e.type, 'data': json.loads(e.data),\n         'stream_position': e.stream_position}\n        for e in client.get_stream(stream_name, stream_position=from_revision)\n    ]\n\n# Category projection: read all events for Order-* streams\ndef read_category(category: str):\n    return read_stream(f\"$ce-{category}\")\n\nDynamoDB Event Store\nimport boto3\nfrom boto3.dynamodb.conditions import Key\nfrom datetime import datetime\nimport json, uuid\n\nclass DynamoEventStore:\n    def __init__(self, table_name: str):\n        self.table = boto3.resource('dynamodb').Table(table_name)\n\n    def append(self, stream_id: str, events: list, expected_version: int = 0):\n        with self.table.batch_writer() as batch:\n            for i, event in enumerate(events):\n                version = expected_version + i + 1\n                batch.put_item(Item={\n                    'PK': f\"STREAM#{stream_id}\",\n                    'SK': f\"VERSION#{version:020d}\",\n                    'GSI1PK': 'EVENTS',\n                    'GSI1SK': datetime.utcnow().isoformat(),\n                    'event_id': str(uuid.uuid4()),\n                    'event_type': event['type'],\n                    'event_data': json.dumps(event['data']),\n                    'version': version,\n                })\n\n    def read_stream(self, stream_id: str, from_version: int = 0):\n        resp = self.table.query(\n            KeyConditionExpression=\n                Key('PK').eq(f\"STREAM#{stream_id}\") &\n                Key('SK').gte(f\"VERSION#{from_version:020d}\")\n        )\n        return [\n            {'event_type': item['event_type'],\n             'data': json.loads(item['event_data']),\n             'version': item['version']}\n            for item in resp['Items']\n        ]\n\n\nDynamoDB table design: PK=STREAM#{id}, SK=VERSION#{version}, GSI1 for global ordering.\n\nBest Practices\nDo\nName streams {Type}-{id} — e.g., Order-abc123\nInclude correlation / causation IDs in metadata for tracing\nVersion event schemas from day one — plan for evolution\nImplement idempotent writes — use event IDs for deduplication\nIndex for your query patterns — stream, global position, event type\nDon't\nMutate or delete events — they are immutable facts\nStore large payloads — keep events small; reference blobs externally\nSkip optimistic concurrency — prevents data corruption\nIgnore backpressure — handle slow consumers gracefully\nCouple projections to the write model — projections should be independently deployable\nNEVER Do\nNEVER update or delete events — Events are immutable historical facts; create compensating events instead\nNEVER skip version checks on append — Optimistic concurrency prevents lost updates and corruption\nNEVER embed large blobs in events — Store blobs externally, reference by ID in the event\nNEVER use random UUIDs for event IDs without idempotency checks — Retries create duplicates\nNEVER read projections for command validation — Use the event stream as the source of truth\nNEVER couple projections to the write transaction — Projections must be rebuildable independently"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/wpank/backend-event-stores",
    "publisherUrl": "https://clawhub.ai/wpank/backend-event-stores",
    "owner": "wpank",
    "version": "1.0.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/backend-event-stores",
    "downloadUrl": "https://openagent3.xyz/downloads/backend-event-stores",
    "agentUrl": "https://openagent3.xyz/skills/backend-event-stores/agent",
    "manifestUrl": "https://openagent3.xyz/skills/backend-event-stores/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/backend-event-stores/agent.md"
  }
}