{
  "schemaVersion": "1.0",
  "item": {
    "slug": "csv-pipeline",
    "name": "CSV Data Pipeline",
    "source": "tencent",
    "type": "skill",
    "category": "数据分析",
    "sourceUrl": "https://clawhub.ai/gitgoodordietrying/csv-pipeline",
    "canonicalUrl": "https://clawhub.ai/gitgoodordietrying/csv-pipeline",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/csv-pipeline",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=csv-pipeline",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SKILL.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-23T16:43:11.935Z",
      "expiresAt": "2026-04-30T16:43:11.935Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
        "contentDisposition": "attachment; filename=\"4claw-imageboard-1.0.1.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/csv-pipeline"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/csv-pipeline",
    "agentPageUrl": "https://openagent3.xyz/skills/csv-pipeline/agent",
    "manifestUrl": "https://openagent3.xyz/skills/csv-pipeline/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/csv-pipeline/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "CSV Data Pipeline",
        "body": "Process tabular data (CSV, TSV, JSON, JSON Lines) using standard command-line tools and Python. No external dependencies required beyond Python 3."
      },
      {
        "title": "When to Use",
        "body": "User provides a CSV/TSV/JSON file and asks to analyze, transform, or report on it\nJoining, filtering, grouping, or aggregating tabular data\nConverting between formats (CSV to JSON, JSON to CSV, etc.)\nDeduplicating, sorting, or cleaning messy data\nGenerating summary statistics or reports\nETL workflows: extract from one format, transform, load into another"
      },
      {
        "title": "Inspect",
        "body": "# Preview first rows\nhead -5 data.csv\n\n# Count rows (excluding header)\ntail -n +2 data.csv | wc -l\n\n# Show column headers\nhead -1 data.csv\n\n# Count unique values in a column (column 3)\ntail -n +2 data.csv | cut -d',' -f3 | sort -u | wc -l"
      },
      {
        "title": "Filter with awk",
        "body": "# Filter rows where column 3 > 100\nawk -F',' 'NR==1 || $3 > 100' data.csv > filtered.csv\n\n# Filter rows matching a pattern in column 2\nawk -F',' 'NR==1 || $2 ~ /pattern/' data.csv > matched.csv\n\n# Sum column 4\nawk -F',' 'NR>1 {sum += $4} END {print sum}' data.csv"
      },
      {
        "title": "Sort and Deduplicate",
        "body": "# Sort by column 2 (numeric)\nhead -1 data.csv > sorted.csv && tail -n +2 data.csv | sort -t',' -k2 -n >> sorted.csv\n\n# Deduplicate by all columns\nhead -1 data.csv > deduped.csv && tail -n +2 data.csv | sort -u >> deduped.csv\n\n# Deduplicate by specific column (keep first occurrence)\nawk -F',' '!seen[$2]++' data.csv > deduped.csv"
      },
      {
        "title": "Read and Inspect",
        "body": "import csv, json, sys\nfrom collections import Counter\n\ndef read_csv(path, delimiter=','):\n    \"\"\"Read CSV/TSV into list of dicts.\"\"\"\n    with open(path, newline='', encoding='utf-8') as f:\n        return list(csv.DictReader(f, delimiter=delimiter))\n\ndef write_csv(rows, path, delimiter=','):\n    \"\"\"Write list of dicts to CSV.\"\"\"\n    if not rows:\n        return\n    with open(path, 'w', newline='', encoding='utf-8') as f:\n        writer = csv.DictWriter(f, fieldnames=rows[0].keys(), delimiter=delimiter)\n        writer.writeheader()\n        writer.writerows(rows)\n\n# Quick stats\ndata = read_csv('data.csv')\nprint(f\"Rows: {len(data)}\")\nprint(f\"Columns: {list(data[0].keys())}\")\nfor col in data[0]:\n    non_empty = sum(1 for r in data if r[col].strip())\n    print(f\"  {col}: {non_empty}/{len(data)} non-empty\")"
      },
      {
        "title": "Filter and Transform",
        "body": "# Filter rows\nfiltered = [r for r in data if float(r['amount']) > 100]\n\n# Add computed column\nfor r in data:\n    r['total'] = str(float(r['price']) * int(r['quantity']))\n\n# Rename columns\nrenamed = [{('new_name' if k == 'old_name' else k): v for k, v in r.items()} for r in data]\n\n# Type conversion\nfor r in data:\n    r['amount'] = float(r['amount'])\n    r['date'] = r['date'].strip()"
      },
      {
        "title": "Group and Aggregate",
        "body": "from collections import defaultdict\n\ndef group_by(rows, key):\n    \"\"\"Group rows by a column value.\"\"\"\n    groups = defaultdict(list)\n    for r in rows:\n        groups[r[key]].append(r)\n    return dict(groups)\n\ndef aggregate(rows, group_col, agg_col, func='sum'):\n    \"\"\"Aggregate a column by groups.\"\"\"\n    groups = group_by(rows, group_col)\n    results = []\n    for name, group in sorted(groups.items()):\n        values = [float(r[agg_col]) for r in group if r[agg_col].strip()]\n        if func == 'sum':\n            agg = sum(values)\n        elif func == 'avg':\n            agg = sum(values) / len(values) if values else 0\n        elif func == 'count':\n            agg = len(values)\n        elif func == 'min':\n            agg = min(values) if values else 0\n        elif func == 'max':\n            agg = max(values) if values else 0\n        results.append({group_col: name, f'{func}_{agg_col}': str(agg), 'count': str(len(group))})\n    return results\n\n# Example: sum revenue by category\nsummary = aggregate(data, 'category', 'revenue', 'sum')\nwrite_csv(summary, 'summary.csv')"
      },
      {
        "title": "Join Datasets",
        "body": "def inner_join(left, right, on):\n    \"\"\"Inner join two datasets on a key column.\"\"\"\n    right_index = {}\n    for r in right:\n        key = r[on]\n        if key not in right_index:\n            right_index[key] = []\n        right_index[key].append(r)\n\n    results = []\n    for lr in left:\n        key = lr[on]\n        if key in right_index:\n            for rr in right_index[key]:\n                merged = {**lr}\n                for k, v in rr.items():\n                    if k != on:\n                        merged[k] = v\n                results.append(merged)\n    return results\n\ndef left_join(left, right, on):\n    \"\"\"Left join: keep all left rows, fill missing right with empty.\"\"\"\n    right_index = {}\n    right_cols = set()\n    for r in right:\n        key = r[on]\n        right_cols.update(r.keys())\n        if key not in right_index:\n            right_index[key] = []\n        right_index[key].append(r)\n    right_cols.discard(on)\n\n    results = []\n    for lr in left:\n        key = lr[on]\n        if key in right_index:\n            for rr in right_index[key]:\n                merged = {**lr}\n                for k, v in rr.items():\n                    if k != on:\n                        merged[k] = v\n                results.append(merged)\n        else:\n            merged = {**lr}\n            for col in right_cols:\n                merged[col] = ''\n            results.append(merged)\n    return results\n\n# Example\norders = read_csv('orders.csv')\ncustomers = read_csv('customers.csv')\njoined = left_join(orders, customers, on='customer_id')\nwrite_csv(joined, 'orders_with_customers.csv')"
      },
      {
        "title": "Deduplicate",
        "body": "def deduplicate(rows, key_cols=None):\n    \"\"\"Remove duplicate rows. If key_cols specified, dedupe by those columns only.\"\"\"\n    seen = set()\n    unique = []\n    for r in rows:\n        if key_cols:\n            key = tuple(r[c] for c in key_cols)\n        else:\n            key = tuple(sorted(r.items()))\n        if key not in seen:\n            seen.add(key)\n            unique.append(r)\n    return unique\n\n# Deduplicate by email column\nclean = deduplicate(data, key_cols=['email'])"
      },
      {
        "title": "CSV to JSON",
        "body": "import json, csv\n\nwith open('data.csv', newline='', encoding='utf-8') as f:\n    rows = list(csv.DictReader(f))\n\n# Array of objects\nwith open('data.json', 'w') as f:\n    json.dump(rows, f, indent=2)\n\n# JSON Lines (one object per line, streamable)\nwith open('data.jsonl', 'w') as f:\n    for row in rows:\n        f.write(json.dumps(row) + '\\n')"
      },
      {
        "title": "JSON to CSV",
        "body": "import json, csv\n\nwith open('data.json') as f:\n    rows = json.load(f)\n\nwith open('data.csv', 'w', newline='', encoding='utf-8') as f:\n    writer = csv.DictWriter(f, fieldnames=rows[0].keys())\n    writer.writeheader()\n    writer.writerows(rows)"
      },
      {
        "title": "JSON Lines to CSV",
        "body": "import json, csv\n\nrows = []\nwith open('data.jsonl') as f:\n    for line in f:\n        if line.strip():\n            rows.append(json.loads(line))\n\nwith open('data.csv', 'w', newline='', encoding='utf-8') as f:\n    all_keys = set()\n    for r in rows:\n        all_keys.update(r.keys())\n    writer = csv.DictWriter(f, fieldnames=sorted(all_keys))\n    writer.writeheader()\n    writer.writerows(rows)"
      },
      {
        "title": "TSV to CSV",
        "body": "tr '\\t' ',' < data.tsv > data.csv"
      },
      {
        "title": "Fix common CSV issues",
        "body": "def clean_csv(rows):\n    \"\"\"Clean common CSV data quality issues.\"\"\"\n    cleaned = []\n    for r in rows:\n        clean_row = {}\n        for k, v in r.items():\n            # Strip whitespace from keys and values\n            k = k.strip()\n            v = v.strip() if isinstance(v, str) else v\n            # Normalize empty values\n            if v in ('', 'N/A', 'n/a', 'NA', 'null', 'NULL', 'None', '-'):\n                v = ''\n            # Normalize boolean values\n            if v.lower() in ('true', 'yes', '1', 'y'):\n                v = 'true'\n            elif v.lower() in ('false', 'no', '0', 'n'):\n                v = 'false'\n            clean_row[k] = v\n        cleaned.append(clean_row)\n    return cleaned"
      },
      {
        "title": "Validate data types",
        "body": "def validate_rows(rows, schema):\n    \"\"\"\n    Validate rows against a schema.\n    schema: dict of column_name -> 'int'|'float'|'date'|'email'|'str'\n    Returns (valid_rows, error_rows)\n    \"\"\"\n    import re\n    valid, errors = [], []\n    for i, r in enumerate(rows):\n        errs = []\n        for col, dtype in schema.items():\n            val = r.get(col, '').strip()\n            if not val:\n                continue\n            if dtype == 'int':\n                try:\n                    int(val)\n                except ValueError:\n                    errs.append(f\"{col}: '{val}' not int\")\n            elif dtype == 'float':\n                try:\n                    float(val)\n                except ValueError:\n                    errs.append(f\"{col}: '{val}' not float\")\n            elif dtype == 'email':\n                if not re.match(r'^[^@]+@[^@]+\\.[^@]+$', val):\n                    errs.append(f\"{col}: '{val}' not email\")\n            elif dtype == 'date':\n                if not re.match(r'^\\d{4}-\\d{2}-\\d{2}', val):\n                    errs.append(f\"{col}: '{val}' not YYYY-MM-DD\")\n        if errs:\n            errors.append({'row': i + 2, 'errors': errs, 'data': r})\n        else:\n            valid.append(r)\n    return valid, errors\n\n# Usage\nvalid, bad = validate_rows(data, {'amount': 'float', 'email': 'email', 'date': 'date'})\nprint(f\"Valid: {len(valid)}, Errors: {len(bad)}\")\nfor e in bad[:5]:\n    print(f\"  Row {e['row']}: {e['errors']}\")"
      },
      {
        "title": "Summary report as Markdown",
        "body": "def generate_report(data, title, group_col, value_col):\n    \"\"\"Generate a Markdown summary report.\"\"\"\n    lines = [f\"# {title}\", f\"\", f\"**Total rows**: {len(data)}\", \"\"]\n\n    # Group summary\n    groups = group_by(data, group_col)\n    lines.append(f\"## By {group_col}\")\n    lines.append(\"\")\n    lines.append(f\"| {group_col} | Count | Sum | Avg | Min | Max |\")\n    lines.append(\"|---|---|---|---|---|---|\")\n\n    for name in sorted(groups):\n        vals = [float(r[value_col]) for r in groups[name] if r[value_col].strip()]\n        if vals:\n            lines.append(f\"| {name} | {len(vals)} | {sum(vals):.2f} | {sum(vals)/len(vals):.2f} | {min(vals):.2f} | {max(vals):.2f} |\")\n\n    lines.append(\"\")\n    lines.append(f\"*Generated from {len(data)} rows*\")\n    return '\\n'.join(lines)\n\nreport = generate_report(data, \"Sales Summary\", \"category\", \"revenue\")\nwith open('report.md', 'w') as f:\n    f.write(report)"
      },
      {
        "title": "Large File Handling",
        "body": "For files too large to load into memory at once:\n\ndef stream_process(input_path, output_path, transform_fn, delimiter=','):\n    \"\"\"Process a CSV row-by-row without loading entire file.\"\"\"\n    with open(input_path, newline='', encoding='utf-8') as fin, \\\n         open(output_path, 'w', newline='', encoding='utf-8') as fout:\n        reader = csv.DictReader(fin, delimiter=delimiter)\n        writer = None\n        for row in reader:\n            result = transform_fn(row)\n            if result is None:\n                continue  # Skip row\n            if writer is None:\n                writer = csv.DictWriter(fout, fieldnames=result.keys(), delimiter=delimiter)\n                writer.writeheader()\n            writer.writerow(result)\n\n# Example: filter and transform in streaming fashion\ndef process_row(row):\n    if float(row.get('amount', 0) or 0) < 10:\n        return None  # Skip small amounts\n    row['amount_usd'] = str(float(row['amount']) * 1.0)  # Add computed field\n    return row\n\nstream_process('big_file.csv', 'output.csv', process_row)"
      },
      {
        "title": "Tips",
        "body": "Always check encoding: file -i data.csv or open with encoding='utf-8-sig' for BOM files\nFor Excel exports with commas in values, the CSV module handles quoting automatically\nUse json.dumps(ensure_ascii=False) for international characters\nPipe-delimited files: use delimiter='|' in csv.reader/writer\nFor very large aggregations, consider sqlite3 which Python includes:\nsqlite3 :memory: \".mode csv\" \".import data.csv t\" \"SELECT category, SUM(amount) FROM t GROUP BY category;\""
      }
    ],
    "body": "CSV Data Pipeline\n\nProcess tabular data (CSV, TSV, JSON, JSON Lines) using standard command-line tools and Python. No external dependencies required beyond Python 3.\n\nWhen to Use\nUser provides a CSV/TSV/JSON file and asks to analyze, transform, or report on it\nJoining, filtering, grouping, or aggregating tabular data\nConverting between formats (CSV to JSON, JSON to CSV, etc.)\nDeduplicating, sorting, or cleaning messy data\nGenerating summary statistics or reports\nETL workflows: extract from one format, transform, load into another\nQuick Operations with Standard Tools\nInspect\n# Preview first rows\nhead -5 data.csv\n\n# Count rows (excluding header)\ntail -n +2 data.csv | wc -l\n\n# Show column headers\nhead -1 data.csv\n\n# Count unique values in a column (column 3)\ntail -n +2 data.csv | cut -d',' -f3 | sort -u | wc -l\n\nFilter with awk\n# Filter rows where column 3 > 100\nawk -F',' 'NR==1 || $3 > 100' data.csv > filtered.csv\n\n# Filter rows matching a pattern in column 2\nawk -F',' 'NR==1 || $2 ~ /pattern/' data.csv > matched.csv\n\n# Sum column 4\nawk -F',' 'NR>1 {sum += $4} END {print sum}' data.csv\n\nSort and Deduplicate\n# Sort by column 2 (numeric)\nhead -1 data.csv > sorted.csv && tail -n +2 data.csv | sort -t',' -k2 -n >> sorted.csv\n\n# Deduplicate by all columns\nhead -1 data.csv > deduped.csv && tail -n +2 data.csv | sort -u >> deduped.csv\n\n# Deduplicate by specific column (keep first occurrence)\nawk -F',' '!seen[$2]++' data.csv > deduped.csv\n\nPython Operations (for complex transforms)\nRead and Inspect\nimport csv, json, sys\nfrom collections import Counter\n\ndef read_csv(path, delimiter=','):\n    \"\"\"Read CSV/TSV into list of dicts.\"\"\"\n    with open(path, newline='', encoding='utf-8') as f:\n        return list(csv.DictReader(f, delimiter=delimiter))\n\ndef write_csv(rows, path, delimiter=','):\n    \"\"\"Write list of dicts to CSV.\"\"\"\n    if not rows:\n        return\n    with open(path, 'w', newline='', encoding='utf-8') as f:\n        writer = csv.DictWriter(f, fieldnames=rows[0].keys(), delimiter=delimiter)\n        writer.writeheader()\n        writer.writerows(rows)\n\n# Quick stats\ndata = read_csv('data.csv')\nprint(f\"Rows: {len(data)}\")\nprint(f\"Columns: {list(data[0].keys())}\")\nfor col in data[0]:\n    non_empty = sum(1 for r in data if r[col].strip())\n    print(f\"  {col}: {non_empty}/{len(data)} non-empty\")\n\nFilter and Transform\n# Filter rows\nfiltered = [r for r in data if float(r['amount']) > 100]\n\n# Add computed column\nfor r in data:\n    r['total'] = str(float(r['price']) * int(r['quantity']))\n\n# Rename columns\nrenamed = [{('new_name' if k == 'old_name' else k): v for k, v in r.items()} for r in data]\n\n# Type conversion\nfor r in data:\n    r['amount'] = float(r['amount'])\n    r['date'] = r['date'].strip()\n\nGroup and Aggregate\nfrom collections import defaultdict\n\ndef group_by(rows, key):\n    \"\"\"Group rows by a column value.\"\"\"\n    groups = defaultdict(list)\n    for r in rows:\n        groups[r[key]].append(r)\n    return dict(groups)\n\ndef aggregate(rows, group_col, agg_col, func='sum'):\n    \"\"\"Aggregate a column by groups.\"\"\"\n    groups = group_by(rows, group_col)\n    results = []\n    for name, group in sorted(groups.items()):\n        values = [float(r[agg_col]) for r in group if r[agg_col].strip()]\n        if func == 'sum':\n            agg = sum(values)\n        elif func == 'avg':\n            agg = sum(values) / len(values) if values else 0\n        elif func == 'count':\n            agg = len(values)\n        elif func == 'min':\n            agg = min(values) if values else 0\n        elif func == 'max':\n            agg = max(values) if values else 0\n        results.append({group_col: name, f'{func}_{agg_col}': str(agg), 'count': str(len(group))})\n    return results\n\n# Example: sum revenue by category\nsummary = aggregate(data, 'category', 'revenue', 'sum')\nwrite_csv(summary, 'summary.csv')\n\nJoin Datasets\ndef inner_join(left, right, on):\n    \"\"\"Inner join two datasets on a key column.\"\"\"\n    right_index = {}\n    for r in right:\n        key = r[on]\n        if key not in right_index:\n            right_index[key] = []\n        right_index[key].append(r)\n\n    results = []\n    for lr in left:\n        key = lr[on]\n        if key in right_index:\n            for rr in right_index[key]:\n                merged = {**lr}\n                for k, v in rr.items():\n                    if k != on:\n                        merged[k] = v\n                results.append(merged)\n    return results\n\ndef left_join(left, right, on):\n    \"\"\"Left join: keep all left rows, fill missing right with empty.\"\"\"\n    right_index = {}\n    right_cols = set()\n    for r in right:\n        key = r[on]\n        right_cols.update(r.keys())\n        if key not in right_index:\n            right_index[key] = []\n        right_index[key].append(r)\n    right_cols.discard(on)\n\n    results = []\n    for lr in left:\n        key = lr[on]\n        if key in right_index:\n            for rr in right_index[key]:\n                merged = {**lr}\n                for k, v in rr.items():\n                    if k != on:\n                        merged[k] = v\n                results.append(merged)\n        else:\n            merged = {**lr}\n            for col in right_cols:\n                merged[col] = ''\n            results.append(merged)\n    return results\n\n# Example\norders = read_csv('orders.csv')\ncustomers = read_csv('customers.csv')\njoined = left_join(orders, customers, on='customer_id')\nwrite_csv(joined, 'orders_with_customers.csv')\n\nDeduplicate\ndef deduplicate(rows, key_cols=None):\n    \"\"\"Remove duplicate rows. If key_cols specified, dedupe by those columns only.\"\"\"\n    seen = set()\n    unique = []\n    for r in rows:\n        if key_cols:\n            key = tuple(r[c] for c in key_cols)\n        else:\n            key = tuple(sorted(r.items()))\n        if key not in seen:\n            seen.add(key)\n            unique.append(r)\n    return unique\n\n# Deduplicate by email column\nclean = deduplicate(data, key_cols=['email'])\n\nFormat Conversion\nCSV to JSON\nimport json, csv\n\nwith open('data.csv', newline='', encoding='utf-8') as f:\n    rows = list(csv.DictReader(f))\n\n# Array of objects\nwith open('data.json', 'w') as f:\n    json.dump(rows, f, indent=2)\n\n# JSON Lines (one object per line, streamable)\nwith open('data.jsonl', 'w') as f:\n    for row in rows:\n        f.write(json.dumps(row) + '\\n')\n\nJSON to CSV\nimport json, csv\n\nwith open('data.json') as f:\n    rows = json.load(f)\n\nwith open('data.csv', 'w', newline='', encoding='utf-8') as f:\n    writer = csv.DictWriter(f, fieldnames=rows[0].keys())\n    writer.writeheader()\n    writer.writerows(rows)\n\nJSON Lines to CSV\nimport json, csv\n\nrows = []\nwith open('data.jsonl') as f:\n    for line in f:\n        if line.strip():\n            rows.append(json.loads(line))\n\nwith open('data.csv', 'w', newline='', encoding='utf-8') as f:\n    all_keys = set()\n    for r in rows:\n        all_keys.update(r.keys())\n    writer = csv.DictWriter(f, fieldnames=sorted(all_keys))\n    writer.writeheader()\n    writer.writerows(rows)\n\nTSV to CSV\ntr '\\t' ',' < data.tsv > data.csv\n\nData Cleaning Patterns\nFix common CSV issues\ndef clean_csv(rows):\n    \"\"\"Clean common CSV data quality issues.\"\"\"\n    cleaned = []\n    for r in rows:\n        clean_row = {}\n        for k, v in r.items():\n            # Strip whitespace from keys and values\n            k = k.strip()\n            v = v.strip() if isinstance(v, str) else v\n            # Normalize empty values\n            if v in ('', 'N/A', 'n/a', 'NA', 'null', 'NULL', 'None', '-'):\n                v = ''\n            # Normalize boolean values\n            if v.lower() in ('true', 'yes', '1', 'y'):\n                v = 'true'\n            elif v.lower() in ('false', 'no', '0', 'n'):\n                v = 'false'\n            clean_row[k] = v\n        cleaned.append(clean_row)\n    return cleaned\n\nValidate data types\ndef validate_rows(rows, schema):\n    \"\"\"\n    Validate rows against a schema.\n    schema: dict of column_name -> 'int'|'float'|'date'|'email'|'str'\n    Returns (valid_rows, error_rows)\n    \"\"\"\n    import re\n    valid, errors = [], []\n    for i, r in enumerate(rows):\n        errs = []\n        for col, dtype in schema.items():\n            val = r.get(col, '').strip()\n            if not val:\n                continue\n            if dtype == 'int':\n                try:\n                    int(val)\n                except ValueError:\n                    errs.append(f\"{col}: '{val}' not int\")\n            elif dtype == 'float':\n                try:\n                    float(val)\n                except ValueError:\n                    errs.append(f\"{col}: '{val}' not float\")\n            elif dtype == 'email':\n                if not re.match(r'^[^@]+@[^@]+\\.[^@]+$', val):\n                    errs.append(f\"{col}: '{val}' not email\")\n            elif dtype == 'date':\n                if not re.match(r'^\\d{4}-\\d{2}-\\d{2}', val):\n                    errs.append(f\"{col}: '{val}' not YYYY-MM-DD\")\n        if errs:\n            errors.append({'row': i + 2, 'errors': errs, 'data': r})\n        else:\n            valid.append(r)\n    return valid, errors\n\n# Usage\nvalid, bad = validate_rows(data, {'amount': 'float', 'email': 'email', 'date': 'date'})\nprint(f\"Valid: {len(valid)}, Errors: {len(bad)}\")\nfor e in bad[:5]:\n    print(f\"  Row {e['row']}: {e['errors']}\")\n\nGenerating Reports\nSummary report as Markdown\ndef generate_report(data, title, group_col, value_col):\n    \"\"\"Generate a Markdown summary report.\"\"\"\n    lines = [f\"# {title}\", f\"\", f\"**Total rows**: {len(data)}\", \"\"]\n\n    # Group summary\n    groups = group_by(data, group_col)\n    lines.append(f\"## By {group_col}\")\n    lines.append(\"\")\n    lines.append(f\"| {group_col} | Count | Sum | Avg | Min | Max |\")\n    lines.append(\"|---|---|---|---|---|---|\")\n\n    for name in sorted(groups):\n        vals = [float(r[value_col]) for r in groups[name] if r[value_col].strip()]\n        if vals:\n            lines.append(f\"| {name} | {len(vals)} | {sum(vals):.2f} | {sum(vals)/len(vals):.2f} | {min(vals):.2f} | {max(vals):.2f} |\")\n\n    lines.append(\"\")\n    lines.append(f\"*Generated from {len(data)} rows*\")\n    return '\\n'.join(lines)\n\nreport = generate_report(data, \"Sales Summary\", \"category\", \"revenue\")\nwith open('report.md', 'w') as f:\n    f.write(report)\n\nLarge File Handling\n\nFor files too large to load into memory at once:\n\ndef stream_process(input_path, output_path, transform_fn, delimiter=','):\n    \"\"\"Process a CSV row-by-row without loading entire file.\"\"\"\n    with open(input_path, newline='', encoding='utf-8') as fin, \\\n         open(output_path, 'w', newline='', encoding='utf-8') as fout:\n        reader = csv.DictReader(fin, delimiter=delimiter)\n        writer = None\n        for row in reader:\n            result = transform_fn(row)\n            if result is None:\n                continue  # Skip row\n            if writer is None:\n                writer = csv.DictWriter(fout, fieldnames=result.keys(), delimiter=delimiter)\n                writer.writeheader()\n            writer.writerow(result)\n\n# Example: filter and transform in streaming fashion\ndef process_row(row):\n    if float(row.get('amount', 0) or 0) < 10:\n        return None  # Skip small amounts\n    row['amount_usd'] = str(float(row['amount']) * 1.0)  # Add computed field\n    return row\n\nstream_process('big_file.csv', 'output.csv', process_row)\n\nTips\nAlways check encoding: file -i data.csv or open with encoding='utf-8-sig' for BOM files\nFor Excel exports with commas in values, the CSV module handles quoting automatically\nUse json.dumps(ensure_ascii=False) for international characters\nPipe-delimited files: use delimiter='|' in csv.reader/writer\nFor very large aggregations, consider sqlite3 which Python includes:\nsqlite3 :memory: \".mode csv\" \".import data.csv t\" \"SELECT category, SUM(amount) FROM t GROUP BY category;\""
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/gitgoodordietrying/csv-pipeline",
    "publisherUrl": "https://clawhub.ai/gitgoodordietrying/csv-pipeline",
    "owner": "gitgoodordietrying",
    "version": "1.0.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/csv-pipeline",
    "downloadUrl": "https://openagent3.xyz/downloads/csv-pipeline",
    "agentUrl": "https://openagent3.xyz/skills/csv-pipeline/agent",
    "manifestUrl": "https://openagent3.xyz/skills/csv-pipeline/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/csv-pipeline/agent.md"
  }
}