{
  "schemaVersion": "1.0",
  "item": {
    "slug": "data-profiler",
    "name": "Data Profiler",
    "source": "tencent",
    "type": "skill",
    "category": "数据分析",
    "sourceUrl": "https://clawhub.ai/datadrivenconstruction/data-profiler",
    "canonicalUrl": "https://clawhub.ai/datadrivenconstruction/data-profiler",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/data-profiler",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=data-profiler",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "claw.json",
      "instructions.md",
      "SKILL.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-23T16:43:11.935Z",
      "expiresAt": "2026-04-30T16:43:11.935Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
        "contentDisposition": "attachment; filename=\"4claw-imageboard-1.0.1.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/data-profiler"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/data-profiler",
    "agentPageUrl": "https://openagent3.xyz/skills/data-profiler/agent",
    "manifestUrl": "https://openagent3.xyz/skills/data-profiler/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/data-profiler/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Overview",
        "body": "Analyze construction data to understand its characteristics, distributions, quality, and patterns. Essential for data quality assessment, ETL planning, and identifying data issues before they impact projects."
      },
      {
        "title": "Business Case",
        "body": "Before using any construction data, you need to understand:\n\nWhat data types are present\nDistribution of values\nMissing data patterns\nAnomalies and outliers\nReferential integrity issues\n\nThis skill profiles data to answer these questions and provides actionable insights."
      },
      {
        "title": "Technical Implementation",
        "body": "from dataclasses import dataclass, field\nfrom typing import List, Dict, Any, Optional, Tuple\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\nimport json\n\n@dataclass\nclass ColumnProfile:\n    name: str\n    data_type: str\n    inferred_type: str  # More specific: project_id, cost, date, csi_code, etc.\n    total_count: int\n    null_count: int\n    null_percentage: float\n    unique_count: int\n    uniqueness_ratio: float\n    # For numeric columns\n    min_value: Optional[float] = None\n    max_value: Optional[float] = None\n    mean_value: Optional[float] = None\n    median_value: Optional[float] = None\n    std_dev: Optional[float] = None\n    # For string columns\n    min_length: Optional[int] = None\n    max_length: Optional[int] = None\n    avg_length: Optional[float] = None\n    # Top values\n    top_values: List[Tuple[Any, int]] = field(default_factory=list)\n    # Patterns\n    common_patterns: List[str] = field(default_factory=list)\n    # Quality flags\n    quality_issues: List[str] = field(default_factory=list)\n\n@dataclass\nclass DataProfile:\n    source_name: str\n    row_count: int\n    column_count: int\n    columns: List[ColumnProfile]\n    duplicate_rows: int\n    memory_usage: str\n    profiled_at: datetime\n    quality_score: float\n    recommendations: List[str]\n\nclass ConstructionDataProfiler:\n    \"\"\"Profile construction data for quality and characteristics.\"\"\"\n\n    # Known construction data patterns\n    CONSTRUCTION_PATTERNS = {\n        'csi_code': r'^\\d{2}\\s?\\d{2}\\s?\\d{2}$',\n        'project_id': r'^[A-Z]{2,4}[-_]?\\d{3,6}$',\n        'cost_code': r'^\\d{2}[-.]?\\d{2,4}$',\n        'wbs': r'^[\\d.]+$',\n        'phone': r'^\\(?\\d{3}\\)?[-.\\s]?\\d{3}[-.\\s]?\\d{4}$',\n        'email': r'^[\\w.-]+@[\\w.-]+\\.\\w+$',\n        'date_iso': r'^\\d{4}-\\d{2}-\\d{2}',\n        'date_us': r'^\\d{1,2}/\\d{1,2}/\\d{2,4}$',\n        'currency': r'^\\$?[\\d,]+\\.?\\d{0,2}$',\n        'percentage': r'^\\d+\\.?\\d*%?$',\n    }\n\n    # Construction-specific column name patterns\n    COLUMN_TYPE_HINTS = {\n        'project': ['project_id', 'project_name', 'proj', 'job'],\n        'cost': ['cost', 'amount', 'price', 'total', 'budget', 'actual'],\n        'date': ['date', 'start', 'finish', 'end', 'created', 'modified'],\n        'quantity': ['qty', 'quantity', 'count', 'units'],\n        'csi': ['csi', 'division', 'masterformat', 'spec'],\n        'location': ['location', 'area', 'zone', 'floor', 'level'],\n        'person': ['owner', 'manager', 'superintendent', 'foreman', 'contact'],\n    }\n\n    def __init__(self):\n        self.profiles: Dict[str, DataProfile] = {}\n\n    def profile_dataframe(self, df: pd.DataFrame, source_name: str) -> DataProfile:\n        \"\"\"Profile a pandas DataFrame.\"\"\"\n        columns = []\n\n        for col in df.columns:\n            col_profile = self._profile_column(df[col], col)\n            columns.append(col_profile)\n\n        # Calculate duplicates\n        duplicate_rows = len(df) - len(df.drop_duplicates())\n\n        # Calculate memory usage\n        memory_bytes = df.memory_usage(deep=True).sum()\n        if memory_bytes < 1024:\n            memory_usage = f\"{memory_bytes} B\"\n        elif memory_bytes < 1024**2:\n            memory_usage = f\"{memory_bytes/1024:.1f} KB\"\n        else:\n            memory_usage = f\"{memory_bytes/1024**2:.1f} MB\"\n\n        # Calculate overall quality score\n        quality_score = self._calculate_quality_score(columns)\n\n        # Generate recommendations\n        recommendations = self._generate_recommendations(columns, df)\n\n        profile = DataProfile(\n            source_name=source_name,\n            row_count=len(df),\n            column_count=len(df.columns),\n            columns=columns,\n            duplicate_rows=duplicate_rows,\n            memory_usage=memory_usage,\n            profiled_at=datetime.now(),\n            quality_score=quality_score,\n            recommendations=recommendations\n        )\n\n        self.profiles[source_name] = profile\n        return profile\n\n    def _profile_column(self, series: pd.Series, name: str) -> ColumnProfile:\n        \"\"\"Profile a single column.\"\"\"\n        total_count = len(series)\n        null_count = series.isnull().sum()\n        null_percentage = (null_count / total_count * 100) if total_count > 0 else 0\n\n        # Get non-null values for analysis\n        non_null = series.dropna()\n        unique_count = non_null.nunique()\n        uniqueness_ratio = unique_count / len(non_null) if len(non_null) > 0 else 0\n\n        profile = ColumnProfile(\n            name=name,\n            data_type=str(series.dtype),\n            inferred_type=self._infer_construction_type(series, name),\n            total_count=total_count,\n            null_count=null_count,\n            null_percentage=round(null_percentage, 2),\n            unique_count=unique_count,\n            uniqueness_ratio=round(uniqueness_ratio, 4)\n        )\n\n        # Numeric analysis\n        if pd.api.types.is_numeric_dtype(series):\n            profile.min_value = float(non_null.min()) if len(non_null) > 0 else None\n            profile.max_value = float(non_null.max()) if len(non_null) > 0 else None\n            profile.mean_value = float(non_null.mean()) if len(non_null) > 0 else None\n            profile.median_value = float(non_null.median()) if len(non_null) > 0 else None\n            profile.std_dev = float(non_null.std()) if len(non_null) > 1 else None\n\n            # Check for outliers\n            if len(non_null) > 10 and profile.std_dev:\n                outliers = non_null[abs(non_null - profile.mean_value) > 3 * profile.std_dev]\n                if len(outliers) > 0:\n                    profile.quality_issues.append(f\"{len(outliers)} potential outliers detected\")\n\n            # Check for negative costs\n            if any(hint in name.lower() for hint in ['cost', 'amount', 'price', 'total']):\n                negatives = (non_null < 0).sum()\n                if negatives > 0:\n                    profile.quality_issues.append(f\"{negatives} negative values in cost column\")\n\n        # String analysis\n        elif pd.api.types.is_object_dtype(series) or pd.api.types.is_string_dtype(series):\n            str_series = non_null.astype(str)\n            lengths = str_series.str.len()\n            profile.min_length = int(lengths.min()) if len(lengths) > 0 else None\n            profile.max_length = int(lengths.max()) if len(lengths) > 0 else None\n            profile.avg_length = float(lengths.mean()) if len(lengths) > 0 else None\n\n            # Detect patterns\n            profile.common_patterns = self._detect_patterns(str_series)\n\n        # Top values\n        if len(non_null) > 0:\n            value_counts = non_null.value_counts().head(5)\n            profile.top_values = list(zip(value_counts.index.tolist(), value_counts.values.tolist()))\n\n        # Quality checks\n        if null_percentage > 50:\n            profile.quality_issues.append(\"High null rate (>50%)\")\n        if uniqueness_ratio == 1.0 and total_count > 100:\n            profile.quality_issues.append(\"All unique values - possible ID column\")\n        if uniqueness_ratio < 0.01 and unique_count > 1:\n            profile.quality_issues.append(\"Low cardinality - possible category\")\n\n        return profile\n\n    def _infer_construction_type(self, series: pd.Series, name: str) -> str:\n        \"\"\"Infer construction-specific data type.\"\"\"\n        name_lower = name.lower()\n\n        # Check column name hints\n        for type_name, hints in self.COLUMN_TYPE_HINTS.items():\n            if any(hint in name_lower for hint in hints):\n                return type_name\n\n        # Check data patterns\n        non_null = series.dropna().astype(str)\n        if len(non_null) == 0:\n            return \"unknown\"\n\n        sample = non_null.head(100)\n\n        for pattern_name, pattern in self.CONSTRUCTION_PATTERNS.items():\n            matches = sample.str.match(pattern, na=False).sum()\n            if matches / len(sample) > 0.8:\n                return pattern_name\n\n        # Default to pandas dtype\n        if pd.api.types.is_numeric_dtype(series):\n            return \"numeric\"\n        elif pd.api.types.is_datetime64_any_dtype(series):\n            return \"datetime\"\n        else:\n            return \"text\"\n\n    def _detect_patterns(self, str_series: pd.Series) -> List[str]:\n        \"\"\"Detect common patterns in string data.\"\"\"\n        patterns_found = []\n\n        sample = str_series.head(1000)\n\n        for pattern_name, pattern in self.CONSTRUCTION_PATTERNS.items():\n            matches = sample.str.match(pattern, na=False).sum()\n            if matches / len(sample) > 0.1:\n                patterns_found.append(f\"{pattern_name} ({matches/len(sample):.0%})\")\n\n        return patterns_found[:3]\n\n    def _calculate_quality_score(self, columns: List[ColumnProfile]) -> float:\n        \"\"\"Calculate overall data quality score (0-100).\"\"\"\n        if not columns:\n            return 0.0\n\n        scores = []\n\n        for col in columns:\n            col_score = 100\n\n            # Penalize for nulls\n            col_score -= min(col.null_percentage, 50)\n\n            # Penalize for quality issues\n            col_score -= len(col.quality_issues) * 10\n\n            scores.append(max(col_score, 0))\n\n        return round(sum(scores) / len(scores), 1)\n\n    def _generate_recommendations(self, columns: List[ColumnProfile], df: pd.DataFrame) -> List[str]:\n        \"\"\"Generate recommendations based on profile.\"\"\"\n        recommendations = []\n\n        # High null columns\n        high_null = [c for c in columns if c.null_percentage > 30]\n        if high_null:\n            recommendations.append(\n                f\"Review {len(high_null)} columns with >30% null values: \"\n                f\"{', '.join(c.name for c in high_null[:3])}\"\n            )\n\n        # Potential ID columns without uniqueness\n        for col in columns:\n            if 'id' in col.name.lower() and col.uniqueness_ratio < 1.0:\n                recommendations.append(\n                    f\"Column '{col.name}' appears to be an ID but has duplicate values\"\n                )\n\n        # Date columns that should be datetime\n        for col in columns:\n            if col.inferred_type in ['date_iso', 'date_us'] and col.data_type == 'object':\n                recommendations.append(\n                    f\"Convert '{col.name}' to datetime type for better analysis\"\n                )\n\n        # Cost columns that are strings\n        for col in columns:\n            if col.inferred_type == 'currency' and col.data_type == 'object':\n                recommendations.append(\n                    f\"Convert '{col.name}' to numeric type (remove $ and commas)\"\n                )\n\n        return recommendations\n\n    def profile_to_dict(self, profile: DataProfile) -> Dict:\n        \"\"\"Convert profile to dictionary for JSON export.\"\"\"\n        return {\n            'source_name': profile.source_name,\n            'row_count': profile.row_count,\n            'column_count': profile.column_count,\n            'duplicate_rows': profile.duplicate_rows,\n            'memory_usage': profile.memory_usage,\n            'profiled_at': profile.profiled_at.isoformat(),\n            'quality_score': profile.quality_score,\n            'recommendations': profile.recommendations,\n            'columns': [\n                {\n                    'name': c.name,\n                    'data_type': c.data_type,\n                    'inferred_type': c.inferred_type,\n                    'null_percentage': c.null_percentage,\n                    'unique_count': c.unique_count,\n                    'quality_issues': c.quality_issues,\n                    'top_values': c.top_values[:3]\n                }\n                for c in profile.columns\n            ]\n        }\n\n    def generate_profile_report(self, profile: DataProfile) -> str:\n        \"\"\"Generate markdown profile report.\"\"\"\n        report = [f\"# Data Profile: {profile.source_name}\", \"\"]\n        report.append(f\"**Profiled At:** {profile.profiled_at.strftime('%Y-%m-%d %H:%M')}\")\n        report.append(f\"**Quality Score:** {profile.quality_score}/100\")\n        report.append(\"\")\n\n        # Summary\n        report.append(\"## Summary\")\n        report.append(f\"- **Rows:** {profile.row_count:,}\")\n        report.append(f\"- **Columns:** {profile.column_count}\")\n        report.append(f\"- **Duplicate Rows:** {profile.duplicate_rows:,}\")\n        report.append(f\"- **Memory Usage:** {profile.memory_usage}\")\n        report.append(\"\")\n\n        # Recommendations\n        if profile.recommendations:\n            report.append(\"## Recommendations\")\n            for rec in profile.recommendations:\n                report.append(f\"- {rec}\")\n            report.append(\"\")\n\n        # Column Details\n        report.append(\"## Column Details\")\n        report.append(\"\")\n        report.append(\"| Column | Type | Inferred | Nulls | Unique | Issues |\")\n        report.append(\"|--------|------|----------|-------|--------|--------|\")\n\n        for col in profile.columns:\n            issues = len(col.quality_issues)\n            report.append(\n                f\"| {col.name} | {col.data_type} | {col.inferred_type} | \"\n                f\"{col.null_percentage:.1f}% | {col.unique_count:,} | {issues} |\"\n            )\n\n        # Detailed column profiles\n        report.append(\"\")\n        report.append(\"## Detailed Column Profiles\")\n\n        for col in profile.columns:\n            report.append(f\"\\n### {col.name}\")\n            report.append(f\"- **Type:** {col.data_type} (inferred: {col.inferred_type})\")\n            report.append(f\"- **Nulls:** {col.null_count:,} ({col.null_percentage:.1f}%)\")\n            report.append(f\"- **Unique Values:** {col.unique_count:,} ({col.uniqueness_ratio:.1%})\")\n\n            if col.min_value is not None:\n                report.append(f\"- **Range:** {col.min_value:,.2f} to {col.max_value:,.2f}\")\n                report.append(f\"- **Mean:** {col.mean_value:,.2f}, Median: {col.median_value:,.2f}\")\n\n            if col.min_length is not None:\n                report.append(f\"- **Length:** {col.min_length} to {col.max_length} (avg: {col.avg_length:.1f})\")\n\n            if col.top_values:\n                report.append(f\"- **Top Values:** {col.top_values[:3]}\")\n\n            if col.common_patterns:\n                report.append(f\"- **Patterns:** {', '.join(col.common_patterns)}\")\n\n            if col.quality_issues:\n                report.append(f\"- **Issues:** {', '.join(col.quality_issues)}\")\n\n        return \"\\n\".join(report)\n\n    def compare_profiles(self, profile1: DataProfile, profile2: DataProfile) -> Dict:\n        \"\"\"Compare two profiles to detect schema changes or data drift.\"\"\"\n        comparison = {\n            'profiles': [profile1.source_name, profile2.source_name],\n            'row_count_change': profile2.row_count - profile1.row_count,\n            'quality_change': profile2.quality_score - profile1.quality_score,\n            'new_columns': [],\n            'removed_columns': [],\n            'type_changes': [],\n            'null_rate_changes': []\n        }\n\n        cols1 = {c.name: c for c in profile1.columns}\n        cols2 = {c.name: c for c in profile2.columns}\n\n        # Find new/removed columns\n        comparison['new_columns'] = [n for n in cols2 if n not in cols1]\n        comparison['removed_columns'] = [n for n in cols1 if n not in cols2]\n\n        # Compare common columns\n        for name in cols1:\n            if name in cols2:\n                c1, c2 = cols1[name], cols2[name]\n\n                if c1.data_type != c2.data_type:\n                    comparison['type_changes'].append({\n                        'column': name,\n                        'from': c1.data_type,\n                        'to': c2.data_type\n                    })\n\n                null_change = c2.null_percentage - c1.null_percentage\n                if abs(null_change) > 10:\n                    comparison['null_rate_changes'].append({\n                        'column': name,\n                        'change': null_change\n                    })\n\n        return comparison"
      },
      {
        "title": "Quick Start",
        "body": "import pandas as pd\n\n# Load construction data\ndf = pd.read_excel(\"project_costs.xlsx\")\n\n# Profile the data\nprofiler = ConstructionDataProfiler()\nprofile = profiler.profile_dataframe(df, \"Project Costs 2025\")\n\n# Generate report\nreport = profiler.generate_profile_report(profile)\nprint(report)\n\n# Export to JSON\nprofile_dict = profiler.profile_to_dict(profile)\nwith open(\"profile.json\", \"w\") as f:\n    json.dump(profile_dict, f, indent=2)\n\n# Compare with previous profile\nold_profile = profiler.profile_dataframe(old_df, \"Project Costs 2024\")\ncomparison = profiler.compare_profiles(old_profile, profile)\nprint(f\"Quality changed by: {comparison['quality_change']}\")"
      },
      {
        "title": "Common Use Cases",
        "body": "Pre-ETL Analysis: Profile source data before building pipelines\nQuality Monitoring: Track data quality over time\nSchema Validation: Detect unexpected changes in data structure\nAnomaly Detection: Find outliers and data quality issues"
      },
      {
        "title": "Dependencies",
        "body": "pip install pandas numpy"
      },
      {
        "title": "Resources",
        "body": "Data Profiling Best Practices: DAMA DMBOK\nConstruction Data Standards: CSI MasterFormat, UniFormat"
      }
    ],
    "body": "Data Profiler for Construction\nOverview\n\nAnalyze construction data to understand its characteristics, distributions, quality, and patterns. Essential for data quality assessment, ETL planning, and identifying data issues before they impact projects.\n\nBusiness Case\n\nBefore using any construction data, you need to understand:\n\nWhat data types are present\nDistribution of values\nMissing data patterns\nAnomalies and outliers\nReferential integrity issues\n\nThis skill profiles data to answer these questions and provides actionable insights.\n\nTechnical Implementation\nfrom dataclasses import dataclass, field\nfrom typing import List, Dict, Any, Optional, Tuple\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\nimport json\n\n@dataclass\nclass ColumnProfile:\n    name: str\n    data_type: str\n    inferred_type: str  # More specific: project_id, cost, date, csi_code, etc.\n    total_count: int\n    null_count: int\n    null_percentage: float\n    unique_count: int\n    uniqueness_ratio: float\n    # For numeric columns\n    min_value: Optional[float] = None\n    max_value: Optional[float] = None\n    mean_value: Optional[float] = None\n    median_value: Optional[float] = None\n    std_dev: Optional[float] = None\n    # For string columns\n    min_length: Optional[int] = None\n    max_length: Optional[int] = None\n    avg_length: Optional[float] = None\n    # Top values\n    top_values: List[Tuple[Any, int]] = field(default_factory=list)\n    # Patterns\n    common_patterns: List[str] = field(default_factory=list)\n    # Quality flags\n    quality_issues: List[str] = field(default_factory=list)\n\n@dataclass\nclass DataProfile:\n    source_name: str\n    row_count: int\n    column_count: int\n    columns: List[ColumnProfile]\n    duplicate_rows: int\n    memory_usage: str\n    profiled_at: datetime\n    quality_score: float\n    recommendations: List[str]\n\nclass ConstructionDataProfiler:\n    \"\"\"Profile construction data for quality and characteristics.\"\"\"\n\n    # Known construction data patterns\n    CONSTRUCTION_PATTERNS = {\n        'csi_code': r'^\\d{2}\\s?\\d{2}\\s?\\d{2}$',\n        'project_id': r'^[A-Z]{2,4}[-_]?\\d{3,6}$',\n        'cost_code': r'^\\d{2}[-.]?\\d{2,4}$',\n        'wbs': r'^[\\d.]+$',\n        'phone': r'^\\(?\\d{3}\\)?[-.\\s]?\\d{3}[-.\\s]?\\d{4}$',\n        'email': r'^[\\w.-]+@[\\w.-]+\\.\\w+$',\n        'date_iso': r'^\\d{4}-\\d{2}-\\d{2}',\n        'date_us': r'^\\d{1,2}/\\d{1,2}/\\d{2,4}$',\n        'currency': r'^\\$?[\\d,]+\\.?\\d{0,2}$',\n        'percentage': r'^\\d+\\.?\\d*%?$',\n    }\n\n    # Construction-specific column name patterns\n    COLUMN_TYPE_HINTS = {\n        'project': ['project_id', 'project_name', 'proj', 'job'],\n        'cost': ['cost', 'amount', 'price', 'total', 'budget', 'actual'],\n        'date': ['date', 'start', 'finish', 'end', 'created', 'modified'],\n        'quantity': ['qty', 'quantity', 'count', 'units'],\n        'csi': ['csi', 'division', 'masterformat', 'spec'],\n        'location': ['location', 'area', 'zone', 'floor', 'level'],\n        'person': ['owner', 'manager', 'superintendent', 'foreman', 'contact'],\n    }\n\n    def __init__(self):\n        self.profiles: Dict[str, DataProfile] = {}\n\n    def profile_dataframe(self, df: pd.DataFrame, source_name: str) -> DataProfile:\n        \"\"\"Profile a pandas DataFrame.\"\"\"\n        columns = []\n\n        for col in df.columns:\n            col_profile = self._profile_column(df[col], col)\n            columns.append(col_profile)\n\n        # Calculate duplicates\n        duplicate_rows = len(df) - len(df.drop_duplicates())\n\n        # Calculate memory usage\n        memory_bytes = df.memory_usage(deep=True).sum()\n        if memory_bytes < 1024:\n            memory_usage = f\"{memory_bytes} B\"\n        elif memory_bytes < 1024**2:\n            memory_usage = f\"{memory_bytes/1024:.1f} KB\"\n        else:\n            memory_usage = f\"{memory_bytes/1024**2:.1f} MB\"\n\n        # Calculate overall quality score\n        quality_score = self._calculate_quality_score(columns)\n\n        # Generate recommendations\n        recommendations = self._generate_recommendations(columns, df)\n\n        profile = DataProfile(\n            source_name=source_name,\n            row_count=len(df),\n            column_count=len(df.columns),\n            columns=columns,\n            duplicate_rows=duplicate_rows,\n            memory_usage=memory_usage,\n            profiled_at=datetime.now(),\n            quality_score=quality_score,\n            recommendations=recommendations\n        )\n\n        self.profiles[source_name] = profile\n        return profile\n\n    def _profile_column(self, series: pd.Series, name: str) -> ColumnProfile:\n        \"\"\"Profile a single column.\"\"\"\n        total_count = len(series)\n        null_count = series.isnull().sum()\n        null_percentage = (null_count / total_count * 100) if total_count > 0 else 0\n\n        # Get non-null values for analysis\n        non_null = series.dropna()\n        unique_count = non_null.nunique()\n        uniqueness_ratio = unique_count / len(non_null) if len(non_null) > 0 else 0\n\n        profile = ColumnProfile(\n            name=name,\n            data_type=str(series.dtype),\n            inferred_type=self._infer_construction_type(series, name),\n            total_count=total_count,\n            null_count=null_count,\n            null_percentage=round(null_percentage, 2),\n            unique_count=unique_count,\n            uniqueness_ratio=round(uniqueness_ratio, 4)\n        )\n\n        # Numeric analysis\n        if pd.api.types.is_numeric_dtype(series):\n            profile.min_value = float(non_null.min()) if len(non_null) > 0 else None\n            profile.max_value = float(non_null.max()) if len(non_null) > 0 else None\n            profile.mean_value = float(non_null.mean()) if len(non_null) > 0 else None\n            profile.median_value = float(non_null.median()) if len(non_null) > 0 else None\n            profile.std_dev = float(non_null.std()) if len(non_null) > 1 else None\n\n            # Check for outliers\n            if len(non_null) > 10 and profile.std_dev:\n                outliers = non_null[abs(non_null - profile.mean_value) > 3 * profile.std_dev]\n                if len(outliers) > 0:\n                    profile.quality_issues.append(f\"{len(outliers)} potential outliers detected\")\n\n            # Check for negative costs\n            if any(hint in name.lower() for hint in ['cost', 'amount', 'price', 'total']):\n                negatives = (non_null < 0).sum()\n                if negatives > 0:\n                    profile.quality_issues.append(f\"{negatives} negative values in cost column\")\n\n        # String analysis\n        elif pd.api.types.is_object_dtype(series) or pd.api.types.is_string_dtype(series):\n            str_series = non_null.astype(str)\n            lengths = str_series.str.len()\n            profile.min_length = int(lengths.min()) if len(lengths) > 0 else None\n            profile.max_length = int(lengths.max()) if len(lengths) > 0 else None\n            profile.avg_length = float(lengths.mean()) if len(lengths) > 0 else None\n\n            # Detect patterns\n            profile.common_patterns = self._detect_patterns(str_series)\n\n        # Top values\n        if len(non_null) > 0:\n            value_counts = non_null.value_counts().head(5)\n            profile.top_values = list(zip(value_counts.index.tolist(), value_counts.values.tolist()))\n\n        # Quality checks\n        if null_percentage > 50:\n            profile.quality_issues.append(\"High null rate (>50%)\")\n        if uniqueness_ratio == 1.0 and total_count > 100:\n            profile.quality_issues.append(\"All unique values - possible ID column\")\n        if uniqueness_ratio < 0.01 and unique_count > 1:\n            profile.quality_issues.append(\"Low cardinality - possible category\")\n\n        return profile\n\n    def _infer_construction_type(self, series: pd.Series, name: str) -> str:\n        \"\"\"Infer construction-specific data type.\"\"\"\n        name_lower = name.lower()\n\n        # Check column name hints\n        for type_name, hints in self.COLUMN_TYPE_HINTS.items():\n            if any(hint in name_lower for hint in hints):\n                return type_name\n\n        # Check data patterns\n        non_null = series.dropna().astype(str)\n        if len(non_null) == 0:\n            return \"unknown\"\n\n        sample = non_null.head(100)\n\n        for pattern_name, pattern in self.CONSTRUCTION_PATTERNS.items():\n            matches = sample.str.match(pattern, na=False).sum()\n            if matches / len(sample) > 0.8:\n                return pattern_name\n\n        # Default to pandas dtype\n        if pd.api.types.is_numeric_dtype(series):\n            return \"numeric\"\n        elif pd.api.types.is_datetime64_any_dtype(series):\n            return \"datetime\"\n        else:\n            return \"text\"\n\n    def _detect_patterns(self, str_series: pd.Series) -> List[str]:\n        \"\"\"Detect common patterns in string data.\"\"\"\n        patterns_found = []\n\n        sample = str_series.head(1000)\n\n        for pattern_name, pattern in self.CONSTRUCTION_PATTERNS.items():\n            matches = sample.str.match(pattern, na=False).sum()\n            if matches / len(sample) > 0.1:\n                patterns_found.append(f\"{pattern_name} ({matches/len(sample):.0%})\")\n\n        return patterns_found[:3]\n\n    def _calculate_quality_score(self, columns: List[ColumnProfile]) -> float:\n        \"\"\"Calculate overall data quality score (0-100).\"\"\"\n        if not columns:\n            return 0.0\n\n        scores = []\n\n        for col in columns:\n            col_score = 100\n\n            # Penalize for nulls\n            col_score -= min(col.null_percentage, 50)\n\n            # Penalize for quality issues\n            col_score -= len(col.quality_issues) * 10\n\n            scores.append(max(col_score, 0))\n\n        return round(sum(scores) / len(scores), 1)\n\n    def _generate_recommendations(self, columns: List[ColumnProfile], df: pd.DataFrame) -> List[str]:\n        \"\"\"Generate recommendations based on profile.\"\"\"\n        recommendations = []\n\n        # High null columns\n        high_null = [c for c in columns if c.null_percentage > 30]\n        if high_null:\n            recommendations.append(\n                f\"Review {len(high_null)} columns with >30% null values: \"\n                f\"{', '.join(c.name for c in high_null[:3])}\"\n            )\n\n        # Potential ID columns without uniqueness\n        for col in columns:\n            if 'id' in col.name.lower() and col.uniqueness_ratio < 1.0:\n                recommendations.append(\n                    f\"Column '{col.name}' appears to be an ID but has duplicate values\"\n                )\n\n        # Date columns that should be datetime\n        for col in columns:\n            if col.inferred_type in ['date_iso', 'date_us'] and col.data_type == 'object':\n                recommendations.append(\n                    f\"Convert '{col.name}' to datetime type for better analysis\"\n                )\n\n        # Cost columns that are strings\n        for col in columns:\n            if col.inferred_type == 'currency' and col.data_type == 'object':\n                recommendations.append(\n                    f\"Convert '{col.name}' to numeric type (remove $ and commas)\"\n                )\n\n        return recommendations\n\n    def profile_to_dict(self, profile: DataProfile) -> Dict:\n        \"\"\"Convert profile to dictionary for JSON export.\"\"\"\n        return {\n            'source_name': profile.source_name,\n            'row_count': profile.row_count,\n            'column_count': profile.column_count,\n            'duplicate_rows': profile.duplicate_rows,\n            'memory_usage': profile.memory_usage,\n            'profiled_at': profile.profiled_at.isoformat(),\n            'quality_score': profile.quality_score,\n            'recommendations': profile.recommendations,\n            'columns': [\n                {\n                    'name': c.name,\n                    'data_type': c.data_type,\n                    'inferred_type': c.inferred_type,\n                    'null_percentage': c.null_percentage,\n                    'unique_count': c.unique_count,\n                    'quality_issues': c.quality_issues,\n                    'top_values': c.top_values[:3]\n                }\n                for c in profile.columns\n            ]\n        }\n\n    def generate_profile_report(self, profile: DataProfile) -> str:\n        \"\"\"Generate markdown profile report.\"\"\"\n        report = [f\"# Data Profile: {profile.source_name}\", \"\"]\n        report.append(f\"**Profiled At:** {profile.profiled_at.strftime('%Y-%m-%d %H:%M')}\")\n        report.append(f\"**Quality Score:** {profile.quality_score}/100\")\n        report.append(\"\")\n\n        # Summary\n        report.append(\"## Summary\")\n        report.append(f\"- **Rows:** {profile.row_count:,}\")\n        report.append(f\"- **Columns:** {profile.column_count}\")\n        report.append(f\"- **Duplicate Rows:** {profile.duplicate_rows:,}\")\n        report.append(f\"- **Memory Usage:** {profile.memory_usage}\")\n        report.append(\"\")\n\n        # Recommendations\n        if profile.recommendations:\n            report.append(\"## Recommendations\")\n            for rec in profile.recommendations:\n                report.append(f\"- {rec}\")\n            report.append(\"\")\n\n        # Column Details\n        report.append(\"## Column Details\")\n        report.append(\"\")\n        report.append(\"| Column | Type | Inferred | Nulls | Unique | Issues |\")\n        report.append(\"|--------|------|----------|-------|--------|--------|\")\n\n        for col in profile.columns:\n            issues = len(col.quality_issues)\n            report.append(\n                f\"| {col.name} | {col.data_type} | {col.inferred_type} | \"\n                f\"{col.null_percentage:.1f}% | {col.unique_count:,} | {issues} |\"\n            )\n\n        # Detailed column profiles\n        report.append(\"\")\n        report.append(\"## Detailed Column Profiles\")\n\n        for col in profile.columns:\n            report.append(f\"\\n### {col.name}\")\n            report.append(f\"- **Type:** {col.data_type} (inferred: {col.inferred_type})\")\n            report.append(f\"- **Nulls:** {col.null_count:,} ({col.null_percentage:.1f}%)\")\n            report.append(f\"- **Unique Values:** {col.unique_count:,} ({col.uniqueness_ratio:.1%})\")\n\n            if col.min_value is not None:\n                report.append(f\"- **Range:** {col.min_value:,.2f} to {col.max_value:,.2f}\")\n                report.append(f\"- **Mean:** {col.mean_value:,.2f}, Median: {col.median_value:,.2f}\")\n\n            if col.min_length is not None:\n                report.append(f\"- **Length:** {col.min_length} to {col.max_length} (avg: {col.avg_length:.1f})\")\n\n            if col.top_values:\n                report.append(f\"- **Top Values:** {col.top_values[:3]}\")\n\n            if col.common_patterns:\n                report.append(f\"- **Patterns:** {', '.join(col.common_patterns)}\")\n\n            if col.quality_issues:\n                report.append(f\"- **Issues:** {', '.join(col.quality_issues)}\")\n\n        return \"\\n\".join(report)\n\n    def compare_profiles(self, profile1: DataProfile, profile2: DataProfile) -> Dict:\n        \"\"\"Compare two profiles to detect schema changes or data drift.\"\"\"\n        comparison = {\n            'profiles': [profile1.source_name, profile2.source_name],\n            'row_count_change': profile2.row_count - profile1.row_count,\n            'quality_change': profile2.quality_score - profile1.quality_score,\n            'new_columns': [],\n            'removed_columns': [],\n            'type_changes': [],\n            'null_rate_changes': []\n        }\n\n        cols1 = {c.name: c for c in profile1.columns}\n        cols2 = {c.name: c for c in profile2.columns}\n\n        # Find new/removed columns\n        comparison['new_columns'] = [n for n in cols2 if n not in cols1]\n        comparison['removed_columns'] = [n for n in cols1 if n not in cols2]\n\n        # Compare common columns\n        for name in cols1:\n            if name in cols2:\n                c1, c2 = cols1[name], cols2[name]\n\n                if c1.data_type != c2.data_type:\n                    comparison['type_changes'].append({\n                        'column': name,\n                        'from': c1.data_type,\n                        'to': c2.data_type\n                    })\n\n                null_change = c2.null_percentage - c1.null_percentage\n                if abs(null_change) > 10:\n                    comparison['null_rate_changes'].append({\n                        'column': name,\n                        'change': null_change\n                    })\n\n        return comparison\n\nQuick Start\nimport pandas as pd\n\n# Load construction data\ndf = pd.read_excel(\"project_costs.xlsx\")\n\n# Profile the data\nprofiler = ConstructionDataProfiler()\nprofile = profiler.profile_dataframe(df, \"Project Costs 2025\")\n\n# Generate report\nreport = profiler.generate_profile_report(profile)\nprint(report)\n\n# Export to JSON\nprofile_dict = profiler.profile_to_dict(profile)\nwith open(\"profile.json\", \"w\") as f:\n    json.dump(profile_dict, f, indent=2)\n\n# Compare with previous profile\nold_profile = profiler.profile_dataframe(old_df, \"Project Costs 2024\")\ncomparison = profiler.compare_profiles(old_profile, profile)\nprint(f\"Quality changed by: {comparison['quality_change']}\")\n\nCommon Use Cases\nPre-ETL Analysis: Profile source data before building pipelines\nQuality Monitoring: Track data quality over time\nSchema Validation: Detect unexpected changes in data structure\nAnomaly Detection: Find outliers and data quality issues\nDependencies\npip install pandas numpy\n\nResources\nData Profiling Best Practices: DAMA DMBOK\nConstruction Data Standards: CSI MasterFormat, UniFormat"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/datadrivenconstruction/data-profiler",
    "publisherUrl": "https://clawhub.ai/datadrivenconstruction/data-profiler",
    "owner": "datadrivenconstruction",
    "version": "2.1.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/data-profiler",
    "downloadUrl": "https://openagent3.xyz/downloads/data-profiler",
    "agentUrl": "https://openagent3.xyz/skills/data-profiler/agent",
    "manifestUrl": "https://openagent3.xyz/skills/data-profiler/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/data-profiler/agent.md"
  }
}