{
  "schemaVersion": "1.0",
  "item": {
    "slug": "data-analysis",
    "name": "Data Analysis",
    "source": "tencent",
    "type": "skill",
    "category": "数据分析",
    "sourceUrl": "https://clawhub.ai/ivangdavila/data-analysis",
    "canonicalUrl": "https://clawhub.ai/ivangdavila/data-analysis",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/data-analysis",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=data-analysis",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SKILL.md",
      "chart-selection.md",
      "decision-briefs.md",
      "metric-contracts.md",
      "pitfalls.md",
      "techniques.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-23T16:43:11.935Z",
      "expiresAt": "2026-04-30T16:43:11.935Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
        "contentDisposition": "attachment; filename=\"4claw-imageboard-1.0.1.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/data-analysis"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/data-analysis",
    "agentPageUrl": "https://openagent3.xyz/skills/data-analysis/agent",
    "manifestUrl": "https://openagent3.xyz/skills/data-analysis/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/data-analysis/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "When to Use",
        "body": "Use this skill when the user needs to analyze, explain, or visualize data from SQL, spreadsheets, notebooks, dashboards, exports, or ad hoc tables.\n\nUse it for KPI debugging, experiment readouts, funnel or cohort analysis, anomaly reviews, executive reporting, and quality checks on metrics or query logic.\n\nPrefer this skill over generic coding or spreadsheet help when the hard part is analytical judgment: metric definition, comparison design, interpretation, or recommendation.\n\nUser asks about: analyzing data, finding patterns, understanding metrics, testing hypotheses, cohort analysis, A/B testing, churn analysis, or statistical significance."
      },
      {
        "title": "Core Principle",
        "body": "Analysis without a decision is just arithmetic. Always clarify: What would change if this analysis shows X vs Y?"
      },
      {
        "title": "Methodology First",
        "body": "Before touching data:\n\nWhat decision is this analysis supporting?\nWhat would change your mind? (the real question)\nWhat data do you actually have vs what you wish you had?\nWhat timeframe is relevant?"
      },
      {
        "title": "Statistical Rigor Checklist",
        "body": "Sample size sufficient? (small N = wide confidence intervals)\n Comparison groups fair? (same time period, similar conditions)\n Multiple comparisons? (20 tests = 1 \"significant\" by chance)\n Effect size meaningful? (statistically significant != practically important)\n Uncertainty quantified? (\"12-18% lift\" not just \"15% lift\")"
      },
      {
        "title": "Architecture",
        "body": "This skill does not require local folders, persistent memory, or setup state.\n\nUse the included reference files as lightweight guides:\n\nmetric-contracts.md for KPI definitions and caveats\nchart-selection.md for visual choice and chart anti-patterns\ndecision-briefs.md for stakeholder-facing outputs\npitfalls.md and techniques.md for analytical rigor and method choice"
      },
      {
        "title": "Quick Reference",
        "body": "Load only the smallest relevant file to keep context focused.\n\nTopicFileMetric definition contractsmetric-contracts.mdVisual selection and chart anti-patternschart-selection.mdDecision-ready output formatsdecision-briefs.mdFailure modes to catch earlypitfalls.mdMethod selection by question typetechniques.md"
      },
      {
        "title": "1. Start from the decision, not the dataset",
        "body": "Identify the decision owner, the question that could change a decision, and the deadline before doing analysis.\nIf no decision would change, reframe the request before computing anything."
      },
      {
        "title": "2. Lock the metric contract before calculating",
        "body": "Define entity, grain, numerator, denominator, time window, timezone, filters, exclusions, and source of truth.\nIf any of those are ambiguous, state the ambiguity explicitly before presenting results."
      },
      {
        "title": "3. Separate extraction, transformation, and interpretation",
        "body": "Keep query logic, cleanup assumptions, and analytical conclusions distinguishable.\nNever hide business assumptions inside SQL, formulas, or notebook code without naming them in the write-up."
      },
      {
        "title": "4. Choose visuals to answer a question",
        "body": "Select charts based on the analytical question: trend, comparison, distribution, relationship, composition, funnel, or cohort retention.\nDo not add charts that make the deck look fuller but do not change the decision."
      },
      {
        "title": "5. Brief every result in decision format",
        "body": "Every output should include the answer, evidence, confidence, caveats, and recommended next action.\nIf the output is going to a stakeholder, translate the method into business implications instead of leading with technical detail."
      },
      {
        "title": "6. Stress-test claims before recommending action",
        "body": "Segment by obvious confounders, compare the right baseline, quantify uncertainty, and check sensitivity to exclusions or time windows.\nStrong-looking numbers without robustness checks are not decision-ready."
      },
      {
        "title": "7. Escalate when the data cannot support the claim",
        "body": "Block or downgrade conclusions when sample size is weak, the source is unreliable, definitions drifted, or confounding is unresolved.\nIt is better to say \"unknown yet\" than to produce false confidence."
      },
      {
        "title": "Common Traps",
        "body": "Reusing a KPI name after changing numerator, denominator, or exclusions -> trend comparisons become invalid.\nComparing daily, weekly, and monthly grains in one chart -> movement looks real but is mostly aggregation noise.\nShowing percentages without underlying counts -> leadership overreacts to tiny denominators.\nUsing a pretty chart instead of the right chart -> the output looks polished but hides the actual decision signal.\nHunting for interesting cuts after seeing the result -> narrative follows chance instead of evidence.\nShipping automated reports without metric owners or caveats -> bad numbers spread faster than they can be corrected.\nTreating observational patterns as causal proof -> action plans get built on correlation alone."
      },
      {
        "title": "Approach Selection",
        "body": "Question typeApproachKey output\"Is X different from Y?\"Hypothesis testp-value + effect size + CI\"What predicts Z?\"Regression/correlationCoefficients + R² + residual check\"How do users behave over time?\"Cohort analysisRetention curves by cohort\"Are these groups different?\"SegmentationProfiles + statistical comparison\"What's unusual?\"Anomaly detectionFlagged points + context\n\nFor technique details and when to use each, see techniques.md."
      },
      {
        "title": "Output Standards",
        "body": "Lead with the insight, not the methodology\nQuantify uncertainty - ranges, not point estimates\nState limitations - what this analysis can't tell you\nRecommend next steps - what would strengthen the conclusion"
      },
      {
        "title": "Red Flags to Escalate",
        "body": "User wants to \"prove\" a predetermined conclusion\nSample size too small for reliable inference\nData quality issues that invalidate analysis\nConfounders that can't be controlled for"
      },
      {
        "title": "External Endpoints",
        "body": "This skill makes no external network requests.\n\nEndpointData SentPurposeNoneNoneN/A\n\nNo data is sent externally."
      },
      {
        "title": "Security & Privacy",
        "body": "Data that leaves your machine:\n\nNothing by default.\n\nData that stays local:\n\nNothing by default.\n\nThis skill does NOT:\n\nAccess undeclared external endpoints.\nStore credentials or raw exports in hidden local memory files.\nCreate or depend on local folder systems for persistence.\nCreate automations or background jobs without explicit user confirmation.\nRewrite its own instruction source files."
      },
      {
        "title": "Related Skills",
        "body": "Install with clawhub install <slug> if user confirms:\n\nsql - query design and review for reliable data extraction.\ncsv - cleanup and normalization for tabular inputs before analysis.\ndashboard - implementation patterns for KPI visualization layers.\nreport - structured stakeholder-facing deliverables after analysis.\nbusiness-intelligence - KPI systems and operating cadence beyond one-off analysis."
      },
      {
        "title": "Feedback",
        "body": "If useful: clawhub star data-analysis\nStay updated: clawhub sync"
      }
    ],
    "body": "When to Use\n\nUse this skill when the user needs to analyze, explain, or visualize data from SQL, spreadsheets, notebooks, dashboards, exports, or ad hoc tables.\n\nUse it for KPI debugging, experiment readouts, funnel or cohort analysis, anomaly reviews, executive reporting, and quality checks on metrics or query logic.\n\nPrefer this skill over generic coding or spreadsheet help when the hard part is analytical judgment: metric definition, comparison design, interpretation, or recommendation.\n\nUser asks about: analyzing data, finding patterns, understanding metrics, testing hypotheses, cohort analysis, A/B testing, churn analysis, or statistical significance.\n\nCore Principle\n\nAnalysis without a decision is just arithmetic. Always clarify: What would change if this analysis shows X vs Y?\n\nMethodology First\n\nBefore touching data:\n\nWhat decision is this analysis supporting?\nWhat would change your mind? (the real question)\nWhat data do you actually have vs what you wish you had?\nWhat timeframe is relevant?\nStatistical Rigor Checklist\n Sample size sufficient? (small N = wide confidence intervals)\n Comparison groups fair? (same time period, similar conditions)\n Multiple comparisons? (20 tests = 1 \"significant\" by chance)\n Effect size meaningful? (statistically significant != practically important)\n Uncertainty quantified? (\"12-18% lift\" not just \"15% lift\")\nArchitecture\n\nThis skill does not require local folders, persistent memory, or setup state.\n\nUse the included reference files as lightweight guides:\n\nmetric-contracts.md for KPI definitions and caveats\nchart-selection.md for visual choice and chart anti-patterns\ndecision-briefs.md for stakeholder-facing outputs\npitfalls.md and techniques.md for analytical rigor and method choice\nQuick Reference\n\nLoad only the smallest relevant file to keep context focused.\n\nTopic\tFile\nMetric definition contracts\tmetric-contracts.md\nVisual selection and chart anti-patterns\tchart-selection.md\nDecision-ready output formats\tdecision-briefs.md\nFailure modes to catch early\tpitfalls.md\nMethod selection by question type\ttechniques.md\nCore Rules\n1. Start from the decision, not the dataset\nIdentify the decision owner, the question that could change a decision, and the deadline before doing analysis.\nIf no decision would change, reframe the request before computing anything.\n2. Lock the metric contract before calculating\nDefine entity, grain, numerator, denominator, time window, timezone, filters, exclusions, and source of truth.\nIf any of those are ambiguous, state the ambiguity explicitly before presenting results.\n3. Separate extraction, transformation, and interpretation\nKeep query logic, cleanup assumptions, and analytical conclusions distinguishable.\nNever hide business assumptions inside SQL, formulas, or notebook code without naming them in the write-up.\n4. Choose visuals to answer a question\nSelect charts based on the analytical question: trend, comparison, distribution, relationship, composition, funnel, or cohort retention.\nDo not add charts that make the deck look fuller but do not change the decision.\n5. Brief every result in decision format\nEvery output should include the answer, evidence, confidence, caveats, and recommended next action.\nIf the output is going to a stakeholder, translate the method into business implications instead of leading with technical detail.\n6. Stress-test claims before recommending action\nSegment by obvious confounders, compare the right baseline, quantify uncertainty, and check sensitivity to exclusions or time windows.\nStrong-looking numbers without robustness checks are not decision-ready.\n7. Escalate when the data cannot support the claim\nBlock or downgrade conclusions when sample size is weak, the source is unreliable, definitions drifted, or confounding is unresolved.\nIt is better to say \"unknown yet\" than to produce false confidence.\nCommon Traps\nReusing a KPI name after changing numerator, denominator, or exclusions -> trend comparisons become invalid.\nComparing daily, weekly, and monthly grains in one chart -> movement looks real but is mostly aggregation noise.\nShowing percentages without underlying counts -> leadership overreacts to tiny denominators.\nUsing a pretty chart instead of the right chart -> the output looks polished but hides the actual decision signal.\nHunting for interesting cuts after seeing the result -> narrative follows chance instead of evidence.\nShipping automated reports without metric owners or caveats -> bad numbers spread faster than they can be corrected.\nTreating observational patterns as causal proof -> action plans get built on correlation alone.\nApproach Selection\nQuestion type\tApproach\tKey output\n\"Is X different from Y?\"\tHypothesis test\tp-value + effect size + CI\n\"What predicts Z?\"\tRegression/correlation\tCoefficients + R² + residual check\n\"How do users behave over time?\"\tCohort analysis\tRetention curves by cohort\n\"Are these groups different?\"\tSegmentation\tProfiles + statistical comparison\n\"What's unusual?\"\tAnomaly detection\tFlagged points + context\n\nFor technique details and when to use each, see techniques.md.\n\nOutput Standards\nLead with the insight, not the methodology\nQuantify uncertainty - ranges, not point estimates\nState limitations - what this analysis can't tell you\nRecommend next steps - what would strengthen the conclusion\nRed Flags to Escalate\nUser wants to \"prove\" a predetermined conclusion\nSample size too small for reliable inference\nData quality issues that invalidate analysis\nConfounders that can't be controlled for\nExternal Endpoints\n\nThis skill makes no external network requests.\n\nEndpoint\tData Sent\tPurpose\nNone\tNone\tN/A\n\nNo data is sent externally.\n\nSecurity & Privacy\n\nData that leaves your machine:\n\nNothing by default.\n\nData that stays local:\n\nNothing by default.\n\nThis skill does NOT:\n\nAccess undeclared external endpoints.\nStore credentials or raw exports in hidden local memory files.\nCreate or depend on local folder systems for persistence.\nCreate automations or background jobs without explicit user confirmation.\nRewrite its own instruction source files.\nRelated Skills\n\nInstall with clawhub install <slug> if user confirms:\n\nsql - query design and review for reliable data extraction.\ncsv - cleanup and normalization for tabular inputs before analysis.\ndashboard - implementation patterns for KPI visualization layers.\nreport - structured stakeholder-facing deliverables after analysis.\nbusiness-intelligence - KPI systems and operating cadence beyond one-off analysis.\nFeedback\nIf useful: clawhub star data-analysis\nStay updated: clawhub sync"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/ivangdavila/data-analysis",
    "publisherUrl": "https://clawhub.ai/ivangdavila/data-analysis",
    "owner": "ivangdavila",
    "version": "1.0.2",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/data-analysis",
    "downloadUrl": "https://openagent3.xyz/downloads/data-analysis",
    "agentUrl": "https://openagent3.xyz/skills/data-analysis/agent",
    "manifestUrl": "https://openagent3.xyz/skills/data-analysis/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/data-analysis/agent.md"
  }
}