{
  "schemaVersion": "1.0",
  "item": {
    "slug": "causal-inference",
    "name": "Causal Inference",
    "source": "tencent",
    "type": "skill",
    "category": "效率提升",
    "sourceUrl": "https://clawhub.ai/oswalpalash/causal-inference",
    "canonicalUrl": "https://clawhub.ai/oswalpalash/causal-inference",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/causal-inference",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=causal-inference",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SKILL.md",
      "references/do-calculus.md",
      "references/estimation.md",
      "scripts/backfill_calendar.py",
      "scripts/backfill_email.py",
      "scripts/backfill_messages.py"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-23T16:43:11.935Z",
      "expiresAt": "2026-04-30T16:43:11.935Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
        "contentDisposition": "attachment; filename=\"4claw-imageboard-1.0.1.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/causal-inference"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/causal-inference",
    "agentPageUrl": "https://openagent3.xyz/skills/causal-inference/agent",
    "manifestUrl": "https://openagent3.xyz/skills/causal-inference/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/causal-inference/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Causal Inference",
        "body": "A lightweight causal layer for predicting action outcomes, not by pattern-matching correlations, but by modeling interventions and counterfactuals."
      },
      {
        "title": "Core Invariant",
        "body": "Every action must be representable as an explicit intervention on a causal model, with predicted effects + uncertainty + a falsifiable audit trail.\n\nPlans must be causally valid, not just plausible."
      },
      {
        "title": "When to Trigger",
        "body": "Trigger this skill on ANY high-level action, including but not limited to:\n\nDomainActions to LogCommunicationSend email, send message, reply, follow-up, notification, mentionCalendarCreate/move/cancel meeting, set reminder, RSVPTasksCreate/complete/defer task, set priority, assignFilesCreate/edit/share document, commit code, deploySocialPost, react, comment, share, DMPurchasesOrder, subscribe, cancel, refundSystemConfig change, permission grant, integration setup\n\nAlso trigger when:\n\nReviewing outcomes — \"Did that email get a reply?\" → log outcome, update estimates\nDebugging failures — \"Why didn't this work?\" → trace causal graph\nBackfilling history — \"Analyze my past emails/calendar\" → parse logs, reconstruct actions\nPlanning — \"Should I send now or later?\" → query causal model"
      },
      {
        "title": "Backfill: Bootstrap from Historical Data",
        "body": "Don't start from zero. Parse existing logs to reconstruct past actions + outcomes."
      },
      {
        "title": "Email Backfill",
        "body": "# Extract sent emails with reply status\ngog gmail list --sent --after 2024-01-01 --format json > /tmp/sent_emails.json\n\n# For each sent email, check if reply exists\npython3 scripts/backfill_email.py /tmp/sent_emails.json"
      },
      {
        "title": "Calendar Backfill",
        "body": "# Extract past events with attendance\ngog calendar list --after 2024-01-01 --format json > /tmp/events.json\n\n# Reconstruct: did meeting happen? was it moved? attendee count?\npython3 scripts/backfill_calendar.py /tmp/events.json"
      },
      {
        "title": "Message Backfill (WhatsApp/Discord/Slack)",
        "body": "# Parse message history for send/reply patterns\nwacli search --after 2024-01-01 --from me --format json > /tmp/wa_sent.json\npython3 scripts/backfill_messages.py /tmp/wa_sent.json"
      },
      {
        "title": "Generic Backfill Pattern",
        "body": "# For any historical data source:\nfor record in historical_data:\n    action_event = {\n        \"action\": infer_action_type(record),\n        \"context\": extract_context(record),\n        \"time\": record[\"timestamp\"],\n        \"pre_state\": reconstruct_pre_state(record),\n        \"post_state\": extract_post_state(record),\n        \"outcome\": determine_outcome(record),\n        \"backfilled\": True  # Mark as reconstructed\n    }\n    append_to_log(action_event)"
      },
      {
        "title": "A. Action Log (required)",
        "body": "Every executed action emits a structured event:\n\n{\n  \"action\": \"send_followup\",\n  \"domain\": \"email\",\n  \"context\": {\"recipient_type\": \"warm_lead\", \"prior_touches\": 2},\n  \"time\": \"2025-01-26T10:00:00Z\",\n  \"pre_state\": {\"days_since_last_contact\": 7},\n  \"post_state\": {\"reply_received\": true, \"reply_delay_hours\": 4},\n  \"outcome\": \"positive_reply\",\n  \"outcome_observed_at\": \"2025-01-26T14:00:00Z\",\n  \"backfilled\": false\n}\n\nStore in memory/causal/action_log.jsonl."
      },
      {
        "title": "B. Causal Graphs (per domain)",
        "body": "Start with 10-30 observable variables per domain.\n\nEmail domain:\n\nsend_time → reply_prob\nsubject_style → open_rate\nrecipient_type → reply_prob\nfollowup_count → reply_prob (diminishing)\ntime_since_last → reply_prob\n\nCalendar domain:\n\nmeeting_time → attendance_rate\nattendee_count → slip_risk\nconflict_degree → reschedule_prob\nbuffer_time → focus_quality\n\nMessaging domain:\n\nresponse_delay → conversation_continuation\nmessage_length → response_length\ntime_of_day → response_prob\nplatform → response_delay\n\nTask domain:\n\ndue_date_proximity → completion_prob\npriority_level → completion_speed\ntask_size → deferral_risk\ncontext_switches → error_rate\n\nStore graph definitions in memory/causal/graphs/."
      },
      {
        "title": "C. Estimation",
        "body": "For each \"knob\" (intervention variable), estimate treatment effects:\n\n# Pseudo: effect of morning vs evening sends\neffect = mean(reply_prob | send_time=morning) - mean(reply_prob | send_time=evening)\nuncertainty = std_error(effect)\n\nUse simple regression or propensity matching first. Graduate to do-calculus when graphs are explicit and identification is needed."
      },
      {
        "title": "D. Decision Policy",
        "body": "Before executing actions:\n\nIdentify intervention variable(s)\nQuery causal model for expected outcome distribution\nCompute expected utility + uncertainty bounds\nIf uncertainty > threshold OR expected harm > threshold → refuse or escalate to user\nLog prediction for later validation"
      },
      {
        "title": "On Every Action",
        "body": "BEFORE executing:\n1. Log pre_state\n2. If enough historical data: query model for expected outcome\n3. If high uncertainty or risk: confirm with user\n\nAFTER executing:\n1. Log action + context + time\n2. Set reminder to check outcome (if not immediate)\n\nWHEN outcome observed:\n1. Update action log with post_state + outcome\n2. Re-estimate treatment effects if enough new data"
      },
      {
        "title": "Planning an Action",
        "body": "1. User request → identify candidate actions\n2. For each action:\n   a. Map to intervention(s) on causal graph\n   b. Predict P(outcome | do(action))\n   c. Estimate uncertainty\n   d. Compute expected utility\n3. Rank by expected utility, filter by safety\n4. Execute best action, log prediction\n5. Observe outcome, update model"
      },
      {
        "title": "Debugging a Failure",
        "body": "1. Identify failed outcome\n2. Trace back through causal graph\n3. For each upstream node:\n   a. Was the value as expected?\n   b. Did the causal link hold?\n4. Identify broken link(s)\n5. Compute minimal intervention set that would have prevented failure\n6. Log counterfactual for learning"
      },
      {
        "title": "Quick Start: Bootstrap Today",
        "body": "# 1. Create the infrastructure\nmkdir -p memory/causal/graphs memory/causal/estimates\n\n# 2. Initialize config\ncat > memory/causal/config.yaml << 'EOF'\ndomains:\n  - email\n  - calendar\n  - messaging\n  - tasks\n\nthresholds:\n  max_uncertainty: 0.3\n  min_expected_utility: 0.1\n\nprotected_actions:\n  - delete_email\n  - cancel_meeting\n  - send_to_new_contact\n  - financial_transaction\nEOF\n\n# 3. Backfill one domain (start with email)\npython3 scripts/backfill_email.py\n\n# 4. Estimate initial effects\npython3 scripts/estimate_effect.py --treatment send_time --outcome reply_received --values morning,evening"
      },
      {
        "title": "Safety Constraints",
        "body": "Define \"protected variables\" that require explicit user approval:\n\nprotected:\n  - delete_email\n  - cancel_meeting\n  - send_to_new_contact\n  - financial_transaction\n\nthresholds:\n  max_uncertainty: 0.3  # don't act if P(outcome) uncertainty > 30%\n  min_expected_utility: 0.1  # don't act if expected gain < 10%"
      },
      {
        "title": "Files",
        "body": "memory/causal/action_log.jsonl — all logged actions with outcomes\nmemory/causal/graphs/ — domain-specific causal graph definitions\nmemory/causal/estimates/ — learned treatment effects\nmemory/causal/config.yaml — safety thresholds and protected variables"
      },
      {
        "title": "References",
        "body": "See references/do-calculus.md for formal intervention semantics\nSee references/estimation.md for treatment effect estimation methods"
      }
    ],
    "body": "Causal Inference\n\nA lightweight causal layer for predicting action outcomes, not by pattern-matching correlations, but by modeling interventions and counterfactuals.\n\nCore Invariant\n\nEvery action must be representable as an explicit intervention on a causal model, with predicted effects + uncertainty + a falsifiable audit trail.\n\nPlans must be causally valid, not just plausible.\n\nWhen to Trigger\n\nTrigger this skill on ANY high-level action, including but not limited to:\n\nDomain\tActions to Log\nCommunication\tSend email, send message, reply, follow-up, notification, mention\nCalendar\tCreate/move/cancel meeting, set reminder, RSVP\nTasks\tCreate/complete/defer task, set priority, assign\nFiles\tCreate/edit/share document, commit code, deploy\nSocial\tPost, react, comment, share, DM\nPurchases\tOrder, subscribe, cancel, refund\nSystem\tConfig change, permission grant, integration setup\n\nAlso trigger when:\n\nReviewing outcomes — \"Did that email get a reply?\" → log outcome, update estimates\nDebugging failures — \"Why didn't this work?\" → trace causal graph\nBackfilling history — \"Analyze my past emails/calendar\" → parse logs, reconstruct actions\nPlanning — \"Should I send now or later?\" → query causal model\nBackfill: Bootstrap from Historical Data\n\nDon't start from zero. Parse existing logs to reconstruct past actions + outcomes.\n\nEmail Backfill\n# Extract sent emails with reply status\ngog gmail list --sent --after 2024-01-01 --format json > /tmp/sent_emails.json\n\n# For each sent email, check if reply exists\npython3 scripts/backfill_email.py /tmp/sent_emails.json\n\nCalendar Backfill\n# Extract past events with attendance\ngog calendar list --after 2024-01-01 --format json > /tmp/events.json\n\n# Reconstruct: did meeting happen? was it moved? attendee count?\npython3 scripts/backfill_calendar.py /tmp/events.json\n\nMessage Backfill (WhatsApp/Discord/Slack)\n# Parse message history for send/reply patterns\nwacli search --after 2024-01-01 --from me --format json > /tmp/wa_sent.json\npython3 scripts/backfill_messages.py /tmp/wa_sent.json\n\nGeneric Backfill Pattern\n# For any historical data source:\nfor record in historical_data:\n    action_event = {\n        \"action\": infer_action_type(record),\n        \"context\": extract_context(record),\n        \"time\": record[\"timestamp\"],\n        \"pre_state\": reconstruct_pre_state(record),\n        \"post_state\": extract_post_state(record),\n        \"outcome\": determine_outcome(record),\n        \"backfilled\": True  # Mark as reconstructed\n    }\n    append_to_log(action_event)\n\nArchitecture\nA. Action Log (required)\n\nEvery executed action emits a structured event:\n\n{\n  \"action\": \"send_followup\",\n  \"domain\": \"email\",\n  \"context\": {\"recipient_type\": \"warm_lead\", \"prior_touches\": 2},\n  \"time\": \"2025-01-26T10:00:00Z\",\n  \"pre_state\": {\"days_since_last_contact\": 7},\n  \"post_state\": {\"reply_received\": true, \"reply_delay_hours\": 4},\n  \"outcome\": \"positive_reply\",\n  \"outcome_observed_at\": \"2025-01-26T14:00:00Z\",\n  \"backfilled\": false\n}\n\n\nStore in memory/causal/action_log.jsonl.\n\nB. Causal Graphs (per domain)\n\nStart with 10-30 observable variables per domain.\n\nEmail domain:\n\nsend_time → reply_prob\nsubject_style → open_rate\nrecipient_type → reply_prob\nfollowup_count → reply_prob (diminishing)\ntime_since_last → reply_prob\n\n\nCalendar domain:\n\nmeeting_time → attendance_rate\nattendee_count → slip_risk\nconflict_degree → reschedule_prob\nbuffer_time → focus_quality\n\n\nMessaging domain:\n\nresponse_delay → conversation_continuation\nmessage_length → response_length\ntime_of_day → response_prob\nplatform → response_delay\n\n\nTask domain:\n\ndue_date_proximity → completion_prob\npriority_level → completion_speed\ntask_size → deferral_risk\ncontext_switches → error_rate\n\n\nStore graph definitions in memory/causal/graphs/.\n\nC. Estimation\n\nFor each \"knob\" (intervention variable), estimate treatment effects:\n\n# Pseudo: effect of morning vs evening sends\neffect = mean(reply_prob | send_time=morning) - mean(reply_prob | send_time=evening)\nuncertainty = std_error(effect)\n\n\nUse simple regression or propensity matching first. Graduate to do-calculus when graphs are explicit and identification is needed.\n\nD. Decision Policy\n\nBefore executing actions:\n\nIdentify intervention variable(s)\nQuery causal model for expected outcome distribution\nCompute expected utility + uncertainty bounds\nIf uncertainty > threshold OR expected harm > threshold → refuse or escalate to user\nLog prediction for later validation\nWorkflow\nOn Every Action\nBEFORE executing:\n1. Log pre_state\n2. If enough historical data: query model for expected outcome\n3. If high uncertainty or risk: confirm with user\n\nAFTER executing:\n1. Log action + context + time\n2. Set reminder to check outcome (if not immediate)\n\nWHEN outcome observed:\n1. Update action log with post_state + outcome\n2. Re-estimate treatment effects if enough new data\n\nPlanning an Action\n1. User request → identify candidate actions\n2. For each action:\n   a. Map to intervention(s) on causal graph\n   b. Predict P(outcome | do(action))\n   c. Estimate uncertainty\n   d. Compute expected utility\n3. Rank by expected utility, filter by safety\n4. Execute best action, log prediction\n5. Observe outcome, update model\n\nDebugging a Failure\n1. Identify failed outcome\n2. Trace back through causal graph\n3. For each upstream node:\n   a. Was the value as expected?\n   b. Did the causal link hold?\n4. Identify broken link(s)\n5. Compute minimal intervention set that would have prevented failure\n6. Log counterfactual for learning\n\nQuick Start: Bootstrap Today\n# 1. Create the infrastructure\nmkdir -p memory/causal/graphs memory/causal/estimates\n\n# 2. Initialize config\ncat > memory/causal/config.yaml << 'EOF'\ndomains:\n  - email\n  - calendar\n  - messaging\n  - tasks\n\nthresholds:\n  max_uncertainty: 0.3\n  min_expected_utility: 0.1\n\nprotected_actions:\n  - delete_email\n  - cancel_meeting\n  - send_to_new_contact\n  - financial_transaction\nEOF\n\n# 3. Backfill one domain (start with email)\npython3 scripts/backfill_email.py\n\n# 4. Estimate initial effects\npython3 scripts/estimate_effect.py --treatment send_time --outcome reply_received --values morning,evening\n\nSafety Constraints\n\nDefine \"protected variables\" that require explicit user approval:\n\nprotected:\n  - delete_email\n  - cancel_meeting\n  - send_to_new_contact\n  - financial_transaction\n\nthresholds:\n  max_uncertainty: 0.3  # don't act if P(outcome) uncertainty > 30%\n  min_expected_utility: 0.1  # don't act if expected gain < 10%\n\nFiles\nmemory/causal/action_log.jsonl — all logged actions with outcomes\nmemory/causal/graphs/ — domain-specific causal graph definitions\nmemory/causal/estimates/ — learned treatment effects\nmemory/causal/config.yaml — safety thresholds and protected variables\nReferences\nSee references/do-calculus.md for formal intervention semantics\nSee references/estimation.md for treatment effect estimation methods"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/oswalpalash/causal-inference",
    "publisherUrl": "https://clawhub.ai/oswalpalash/causal-inference",
    "owner": "oswalpalash",
    "version": "0.2.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/causal-inference",
    "downloadUrl": "https://openagent3.xyz/downloads/causal-inference",
    "agentUrl": "https://openagent3.xyz/skills/causal-inference/agent",
    "manifestUrl": "https://openagent3.xyz/skills/causal-inference/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/causal-inference/agent.md"
  }
}