{
  "schemaVersion": "1.0",
  "item": {
    "slug": "erp-integration-analysis",
    "name": "Erp Integration Analysis",
    "source": "tencent",
    "type": "skill",
    "category": "数据分析",
    "sourceUrl": "https://clawhub.ai/datadrivenconstruction/erp-integration-analysis",
    "canonicalUrl": "https://clawhub.ai/datadrivenconstruction/erp-integration-analysis",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/erp-integration-analysis",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=erp-integration-analysis",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "claw.json",
      "instructions.md",
      "SKILL.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-23T16:43:11.935Z",
      "expiresAt": "2026-04-30T16:43:11.935Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
        "contentDisposition": "attachment; filename=\"4claw-imageboard-1.0.1.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/erp-integration-analysis"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/erp-integration-analysis",
    "agentPageUrl": "https://openagent3.xyz/skills/erp-integration-analysis/agent",
    "manifestUrl": "https://openagent3.xyz/skills/erp-integration-analysis/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/erp-integration-analysis/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Overview",
        "body": "Based on DDC methodology (Chapter 1.2), this skill analyzes ERP system integration patterns in construction organizations, mapping data flows between modules and identifying optimization opportunities.\n\nBook Reference: \"Технологии и системы управления в современном строительстве\" / \"Technologies and Management Systems in Modern Construction\""
      },
      {
        "title": "Quick Start",
        "body": "from dataclasses import dataclass, field\nfrom enum import Enum\nfrom typing import List, Dict, Optional, Set, Tuple\nfrom datetime import datetime\nimport json\n\nclass ERPModule(Enum):\n    \"\"\"Common ERP modules in construction\"\"\"\n    FINANCE = \"finance\"\n    PROJECT_MANAGEMENT = \"project_management\"\n    PROCUREMENT = \"procurement\"\n    INVENTORY = \"inventory\"\n    HR = \"human_resources\"\n    PAYROLL = \"payroll\"\n    EQUIPMENT = \"equipment\"\n    SUBCONTRACTS = \"subcontracts\"\n    BILLING = \"billing\"\n    COST_CONTROL = \"cost_control\"\n    DOCUMENT_MANAGEMENT = \"document_management\"\n    REPORTING = \"reporting\"\n\nclass IntegrationMethod(Enum):\n    \"\"\"Types of integration methods\"\"\"\n    API = \"api\"\n    DATABASE = \"database\"\n    FILE_EXPORT = \"file_export\"\n    MANUAL = \"manual\"\n    WEBHOOK = \"webhook\"\n    MESSAGE_QUEUE = \"message_queue\"\n    ETL = \"etl\"\n\nclass DataFlowDirection(Enum):\n    \"\"\"Direction of data flow\"\"\"\n    INBOUND = \"inbound\"\n    OUTBOUND = \"outbound\"\n    BIDIRECTIONAL = \"bidirectional\"\n\n@dataclass\nclass DataFlow:\n    \"\"\"Represents a data flow between systems/modules\"\"\"\n    source_module: str\n    target_module: str\n    data_type: str\n    frequency: str  # real-time, hourly, daily, weekly, manual\n    method: IntegrationMethod\n    direction: DataFlowDirection\n    volume: str  # low, medium, high\n    critical: bool = False\n    issues: List[str] = field(default_factory=list)\n\n@dataclass\nclass ERPSystem:\n    \"\"\"ERP system definition\"\"\"\n    name: str\n    vendor: str\n    version: str\n    modules: List[ERPModule]\n    database: str\n    has_api: bool\n    api_type: Optional[str] = None  # REST, SOAP, GraphQL\n    custom_modules: List[str] = field(default_factory=list)\n\n@dataclass\nclass IntegrationPoint:\n    \"\"\"Integration point between systems\"\"\"\n    id: str\n    source_system: str\n    target_system: str\n    method: IntegrationMethod\n    endpoint: Optional[str] = None\n    authentication: Optional[str] = None\n    data_format: str = \"json\"\n    status: str = \"active\"\n    reliability_score: float = 1.0\n    last_sync: Optional[datetime] = None\n\n@dataclass\nclass IntegrationAnalysis:\n    \"\"\"Complete integration analysis results\"\"\"\n    erp_system: ERPSystem\n    external_systems: List[str]\n    data_flows: List[DataFlow]\n    integration_points: List[IntegrationPoint]\n    integration_score: float\n    bottlenecks: List[str]\n    recommendations: List[str]\n    data_flow_diagram: Dict\n\n\nclass ERPIntegrationAnalyzer:\n    \"\"\"\n    Analyze ERP system integration for construction data flows.\n    Based on DDC methodology Chapter 1.2.\n    \"\"\"\n\n    def __init__(self):\n        self.module_dependencies = self._define_module_dependencies()\n        self.critical_flows = self._define_critical_flows()\n\n    def _define_module_dependencies(self) -> Dict[ERPModule, List[ERPModule]]:\n        \"\"\"Define typical module dependencies\"\"\"\n        return {\n            ERPModule.PROJECT_MANAGEMENT: [\n                ERPModule.COST_CONTROL,\n                ERPModule.PROCUREMENT,\n                ERPModule.HR,\n                ERPModule.DOCUMENT_MANAGEMENT\n            ],\n            ERPModule.COST_CONTROL: [\n                ERPModule.FINANCE,\n                ERPModule.PROJECT_MANAGEMENT,\n                ERPModule.BILLING\n            ],\n            ERPModule.PROCUREMENT: [\n                ERPModule.INVENTORY,\n                ERPModule.FINANCE,\n                ERPModule.SUBCONTRACTS\n            ],\n            ERPModule.BILLING: [\n                ERPModule.FINANCE,\n                ERPModule.PROJECT_MANAGEMENT,\n                ERPModule.COST_CONTROL\n            ],\n            ERPModule.PAYROLL: [\n                ERPModule.HR,\n                ERPModule.FINANCE,\n                ERPModule.PROJECT_MANAGEMENT\n            ],\n            ERPModule.INVENTORY: [\n                ERPModule.PROCUREMENT,\n                ERPModule.PROJECT_MANAGEMENT,\n                ERPModule.FINANCE\n            ],\n            ERPModule.EQUIPMENT: [\n                ERPModule.PROJECT_MANAGEMENT,\n                ERPModule.FINANCE,\n                ERPModule.INVENTORY\n            ],\n            ERPModule.SUBCONTRACTS: [\n                ERPModule.PROCUREMENT,\n                ERPModule.FINANCE,\n                ERPModule.PROJECT_MANAGEMENT\n            ]\n        }\n\n    def _define_critical_flows(self) -> List[Tuple[str, str]]:\n        \"\"\"Define business-critical data flows\"\"\"\n        return [\n            (\"project_management\", \"cost_control\"),\n            (\"cost_control\", \"finance\"),\n            (\"procurement\", \"inventory\"),\n            (\"billing\", \"finance\"),\n            (\"hr\", \"payroll\"),\n            (\"project_management\", \"billing\")\n        ]\n\n    def analyze_erp_integration(\n        self,\n        erp_system: ERPSystem,\n        external_systems: List[Dict],\n        integration_points: List[IntegrationPoint],\n        transaction_logs: Optional[List[Dict]] = None\n    ) -> IntegrationAnalysis:\n        \"\"\"\n        Perform comprehensive ERP integration analysis.\n\n        Args:\n            erp_system: The ERP system to analyze\n            external_systems: List of external systems\n            integration_points: Defined integration points\n            transaction_logs: Optional transaction logs for analysis\n\n        Returns:\n            Complete integration analysis\n        \"\"\"\n        # Map all data flows\n        data_flows = self._map_data_flows(\n            erp_system, integration_points, transaction_logs\n        )\n\n        # Calculate integration score\n        integration_score = self._calculate_integration_score(\n            erp_system, data_flows, integration_points\n        )\n\n        # Identify bottlenecks\n        bottlenecks = self._identify_bottlenecks(\n            data_flows, integration_points\n        )\n\n        # Generate recommendations\n        recommendations = self._generate_recommendations(\n            erp_system, data_flows, bottlenecks\n        )\n\n        # Create data flow diagram\n        diagram = self._create_flow_diagram(\n            erp_system, external_systems, data_flows\n        )\n\n        return IntegrationAnalysis(\n            erp_system=erp_system,\n            external_systems=[s[\"name\"] for s in external_systems],\n            data_flows=data_flows,\n            integration_points=integration_points,\n            integration_score=integration_score,\n            bottlenecks=bottlenecks,\n            recommendations=recommendations,\n            data_flow_diagram=diagram\n        )\n\n    def _map_data_flows(\n        self,\n        erp: ERPSystem,\n        integration_points: List[IntegrationPoint],\n        logs: Optional[List[Dict]]\n    ) -> List[DataFlow]:\n        \"\"\"Map all data flows in the system\"\"\"\n        flows = []\n\n        # Internal module flows\n        for module in erp.modules:\n            dependencies = self.module_dependencies.get(module, [])\n            for dep in dependencies:\n                if dep in erp.modules:\n                    is_critical = (module.value, dep.value) in self.critical_flows\n                    flows.append(DataFlow(\n                        source_module=module.value,\n                        target_module=dep.value,\n                        data_type=self._get_data_type(module, dep),\n                        frequency=\"real-time\",\n                        method=IntegrationMethod.DATABASE,\n                        direction=DataFlowDirection.BIDIRECTIONAL,\n                        volume=\"high\" if is_critical else \"medium\",\n                        critical=is_critical\n                    ))\n\n        # External integration flows\n        for point in integration_points:\n            if point.source_system == erp.name or point.target_system == erp.name:\n                flows.append(DataFlow(\n                    source_module=point.source_system,\n                    target_module=point.target_system,\n                    data_type=\"mixed\",\n                    frequency=self._infer_frequency(point),\n                    method=point.method,\n                    direction=DataFlowDirection.BIDIRECTIONAL,\n                    volume=\"medium\",\n                    critical=False\n                ))\n\n        # Analyze logs if available\n        if logs:\n            flows = self._enhance_flows_from_logs(flows, logs)\n\n        return flows\n\n    def _get_data_type(\n        self, source: ERPModule, target: ERPModule\n    ) -> str:\n        \"\"\"Determine data type for module pair\"\"\"\n        data_types = {\n            (ERPModule.PROJECT_MANAGEMENT, ERPModule.COST_CONTROL): \"costs_budgets\",\n            (ERPModule.COST_CONTROL, ERPModule.FINANCE): \"financial_transactions\",\n            (ERPModule.PROCUREMENT, ERPModule.INVENTORY): \"purchase_orders\",\n            (ERPModule.HR, ERPModule.PAYROLL): \"employee_time\",\n            (ERPModule.BILLING, ERPModule.FINANCE): \"invoices\"\n        }\n        return data_types.get((source, target), \"general_data\")\n\n    def _infer_frequency(self, point: IntegrationPoint) -> str:\n        \"\"\"Infer integration frequency from method\"\"\"\n        if point.method == IntegrationMethod.WEBHOOK:\n            return \"real-time\"\n        elif point.method == IntegrationMethod.API:\n            return \"hourly\"\n        elif point.method == IntegrationMethod.ETL:\n            return \"daily\"\n        elif point.method == IntegrationMethod.FILE_EXPORT:\n            return \"daily\"\n        else:\n            return \"manual\"\n\n    def _enhance_flows_from_logs(\n        self,\n        flows: List[DataFlow],\n        logs: List[Dict]\n    ) -> List[DataFlow]:\n        \"\"\"Enhance flow information from transaction logs\"\"\"\n        # Analyze log patterns\n        flow_stats = {}\n        for log in logs:\n            key = (log.get(\"source\"), log.get(\"target\"))\n            if key not in flow_stats:\n                flow_stats[key] = {\"count\": 0, \"errors\": 0}\n            flow_stats[key][\"count\"] += 1\n            if log.get(\"status\") == \"error\":\n                flow_stats[key][\"errors\"] += 1\n\n        # Update flows with statistics\n        for flow in flows:\n            key = (flow.source_module, flow.target_module)\n            if key in flow_stats:\n                stats = flow_stats[key]\n                error_rate = stats[\"errors\"] / stats[\"count\"] if stats[\"count\"] > 0 else 0\n                if error_rate > 0.1:\n                    flow.issues.append(f\"High error rate: {error_rate:.1%}\")\n                if stats[\"count\"] < 10:\n                    flow.issues.append(\"Low transaction volume\")\n\n        return flows\n\n    def _calculate_integration_score(\n        self,\n        erp: ERPSystem,\n        flows: List[DataFlow],\n        points: List[IntegrationPoint]\n    ) -> float:\n        \"\"\"Calculate overall integration score (0-1)\"\"\"\n        scores = []\n\n        # API availability\n        if erp.has_api:\n            scores.append(1.0)\n        else:\n            scores.append(0.3)\n\n        # Integration method quality\n        method_scores = {\n            IntegrationMethod.API: 1.0,\n            IntegrationMethod.WEBHOOK: 1.0,\n            IntegrationMethod.MESSAGE_QUEUE: 0.9,\n            IntegrationMethod.ETL: 0.8,\n            IntegrationMethod.DATABASE: 0.7,\n            IntegrationMethod.FILE_EXPORT: 0.5,\n            IntegrationMethod.MANUAL: 0.2\n        }\n\n        if points:\n            avg_method_score = sum(\n                method_scores.get(p.method, 0.5) for p in points\n            ) / len(points)\n            scores.append(avg_method_score)\n\n        # Critical flow coverage\n        critical_covered = sum(1 for f in flows if f.critical) / len(self.critical_flows)\n        scores.append(critical_covered)\n\n        # Flow health (issues)\n        flows_with_issues = sum(1 for f in flows if f.issues)\n        flow_health = 1 - (flows_with_issues / len(flows)) if flows else 1\n        scores.append(flow_health)\n\n        return sum(scores) / len(scores)\n\n    def _identify_bottlenecks(\n        self,\n        flows: List[DataFlow],\n        points: List[IntegrationPoint]\n    ) -> List[str]:\n        \"\"\"Identify integration bottlenecks\"\"\"\n        bottlenecks = []\n\n        # Manual integrations\n        manual_flows = [f for f in flows if f.method == IntegrationMethod.MANUAL]\n        if manual_flows:\n            bottlenecks.append(\n                f\"{len(manual_flows)} manual data flows requiring automation\"\n            )\n\n        # File-based integrations\n        file_flows = [f for f in flows if f.method == IntegrationMethod.FILE_EXPORT]\n        if file_flows:\n            bottlenecks.append(\n                f\"{len(file_flows)} file-based integrations causing delays\"\n            )\n\n        # Low reliability points\n        low_reliability = [p for p in points if p.reliability_score < 0.8]\n        if low_reliability:\n            bottlenecks.append(\n                f\"{len(low_reliability)} integration points with low reliability\"\n            )\n\n        # Flows with issues\n        problem_flows = [f for f in flows if f.issues]\n        for flow in problem_flows:\n            for issue in flow.issues:\n                bottlenecks.append(\n                    f\"{flow.source_module} → {flow.target_module}: {issue}\"\n                )\n\n        # Missing critical flows\n        existing_critical = {\n            (f.source_module, f.target_module) for f in flows if f.critical\n        }\n        for critical in self.critical_flows:\n            if critical not in existing_critical:\n                bottlenecks.append(\n                    f\"Missing critical flow: {critical[0]} → {critical[1]}\"\n                )\n\n        return bottlenecks\n\n    def _generate_recommendations(\n        self,\n        erp: ERPSystem,\n        flows: List[DataFlow],\n        bottlenecks: List[str]\n    ) -> List[str]:\n        \"\"\"Generate integration improvement recommendations\"\"\"\n        recommendations = []\n\n        # API recommendations\n        if not erp.has_api:\n            recommendations.append(\n                \"Enable API access for the ERP system to improve integration capabilities\"\n            )\n\n        # Method upgrades\n        manual_count = sum(1 for f in flows if f.method == IntegrationMethod.MANUAL)\n        if manual_count > 0:\n            recommendations.append(\n                f\"Automate {manual_count} manual data flows using API or ETL\"\n            )\n\n        file_count = sum(1 for f in flows if f.method == IntegrationMethod.FILE_EXPORT)\n        if file_count > 2:\n            recommendations.append(\n                \"Replace file-based integrations with real-time API connections\"\n            )\n\n        # Real-time integration\n        non_realtime = sum(\n            1 for f in flows\n            if f.critical and f.frequency not in [\"real-time\", \"hourly\"]\n        )\n        if non_realtime > 0:\n            recommendations.append(\n                f\"Upgrade {non_realtime} critical flows to real-time synchronization\"\n            )\n\n        # Data quality\n        if any(\"error rate\" in b.lower() for b in bottlenecks):\n            recommendations.append(\n                \"Implement data validation at integration points to reduce errors\"\n            )\n\n        # Monitoring\n        recommendations.append(\n            \"Implement integration monitoring dashboard for proactive issue detection\"\n        )\n\n        return recommendations\n\n    def _create_flow_diagram(\n        self,\n        erp: ERPSystem,\n        external_systems: List[Dict],\n        flows: List[DataFlow]\n    ) -> Dict:\n        \"\"\"Create data flow diagram structure\"\"\"\n        nodes = []\n        edges = []\n\n        # Add ERP modules as nodes\n        for module in erp.modules:\n            nodes.append({\n                \"id\": module.value,\n                \"type\": \"erp_module\",\n                \"label\": module.value.replace(\"_\", \" \").title(),\n                \"system\": erp.name\n            })\n\n        # Add external systems as nodes\n        for system in external_systems:\n            nodes.append({\n                \"id\": system[\"name\"],\n                \"type\": \"external\",\n                \"label\": system[\"name\"],\n                \"system\": \"external\"\n            })\n\n        # Add flows as edges\n        for flow in flows:\n            edges.append({\n                \"source\": flow.source_module,\n                \"target\": flow.target_module,\n                \"method\": flow.method.value,\n                \"frequency\": flow.frequency,\n                \"critical\": flow.critical,\n                \"data_type\": flow.data_type\n            })\n\n        return {\n            \"nodes\": nodes,\n            \"edges\": edges,\n            \"legend\": {\n                \"node_types\": [\"erp_module\", \"external\"],\n                \"edge_methods\": [m.value for m in IntegrationMethod]\n            }\n        }\n\n    def compare_integration_options(\n        self,\n        options: List[Dict]\n    ) -> Dict:\n        \"\"\"Compare different integration approaches\"\"\"\n        comparison = []\n\n        for option in options:\n            score = self._score_integration_option(option)\n            comparison.append({\n                \"name\": option[\"name\"],\n                \"method\": option.get(\"method\", \"unknown\"),\n                \"cost\": option.get(\"cost\", \"unknown\"),\n                \"implementation_time\": option.get(\"time\", \"unknown\"),\n                \"reliability\": score[\"reliability\"],\n                \"scalability\": score[\"scalability\"],\n                \"maintenance\": score[\"maintenance\"],\n                \"total_score\": score[\"total\"]\n            })\n\n        # Sort by total score\n        comparison.sort(key=lambda x: x[\"total_score\"], reverse=True)\n\n        return {\n            \"options\": comparison,\n            \"recommendation\": comparison[0][\"name\"] if comparison else None\n        }\n\n    def _score_integration_option(self, option: Dict) -> Dict:\n        \"\"\"Score an integration option\"\"\"\n        method = option.get(\"method\", \"\")\n\n        # Base scores by method\n        method_scores = {\n            \"api\": {\"reliability\": 0.9, \"scalability\": 0.9, \"maintenance\": 0.8},\n            \"etl\": {\"reliability\": 0.8, \"scalability\": 0.8, \"maintenance\": 0.7},\n            \"file\": {\"reliability\": 0.6, \"scalability\": 0.5, \"maintenance\": 0.6},\n            \"manual\": {\"reliability\": 0.4, \"scalability\": 0.2, \"maintenance\": 0.3}\n        }\n\n        scores = method_scores.get(method, {\"reliability\": 0.5, \"scalability\": 0.5, \"maintenance\": 0.5})\n        scores[\"total\"] = sum(scores.values()) / 3\n\n        return scores\n\n\nclass IntegrationHealthMonitor:\n    \"\"\"Monitor ERP integration health\"\"\"\n\n    def __init__(self, integration_points: List[IntegrationPoint]):\n        self.points = integration_points\n        self.history: List[Dict] = []\n\n    def check_health(self) -> Dict:\n        \"\"\"Check current integration health\"\"\"\n        results = {\n            \"timestamp\": datetime.now(),\n            \"overall_status\": \"healthy\",\n            \"points_checked\": len(self.points),\n            \"issues\": []\n        }\n\n        for point in self.points:\n            status = self._check_point(point)\n            if status[\"status\"] != \"healthy\":\n                results[\"issues\"].append({\n                    \"point\": point.id,\n                    \"status\": status[\"status\"],\n                    \"message\": status[\"message\"]\n                })\n\n        if len(results[\"issues\"]) > 0:\n            results[\"overall_status\"] = \"degraded\"\n        if len(results[\"issues\"]) > len(self.points) * 0.5:\n            results[\"overall_status\"] = \"critical\"\n\n        self.history.append(results)\n        return results\n\n    def _check_point(self, point: IntegrationPoint) -> Dict:\n        \"\"\"Check individual integration point\"\"\"\n        if point.status != \"active\":\n            return {\"status\": \"inactive\", \"message\": \"Integration point disabled\"}\n\n        if point.reliability_score < 0.5:\n            return {\"status\": \"degraded\", \"message\": \"Low reliability score\"}\n\n        if point.last_sync:\n            hours_since_sync = (datetime.now() - point.last_sync).total_seconds() / 3600\n            if hours_since_sync > 24:\n                return {\"status\": \"stale\", \"message\": f\"No sync for {hours_since_sync:.0f} hours\"}\n\n        return {\"status\": \"healthy\", \"message\": \"OK\"}\n\n    def get_health_report(self) -> str:\n        \"\"\"Generate health report\"\"\"\n        current = self.check_health()\n\n        report = f\"\"\"\n# ERP Integration Health Report\nGenerated: {current['timestamp'].strftime('%Y-%m-%d %H:%M')}\n\n## Overall Status: {current['overall_status'].upper()}\n\n### Integration Points: {current['points_checked']}\n### Active Issues: {len(current['issues'])}\n\"\"\"\n        if current['issues']:\n            report += \"\\n### Issues:\\n\"\n            for issue in current['issues']:\n                report += f\"- **{issue['point']}**: {issue['status']} - {issue['message']}\\n\"\n\n        return report"
      },
      {
        "title": "Analyze ERP Integration",
        "body": "analyzer = ERPIntegrationAnalyzer()\n\n# Define ERP system\nerp = ERPSystem(\n    name=\"SAP S/4HANA\",\n    vendor=\"SAP\",\n    version=\"2023\",\n    modules=[\n        ERPModule.FINANCE,\n        ERPModule.PROJECT_MANAGEMENT,\n        ERPModule.PROCUREMENT,\n        ERPModule.COST_CONTROL,\n        ERPModule.HR,\n        ERPModule.BILLING\n    ],\n    database=\"HANA\",\n    has_api=True,\n    api_type=\"REST\"\n)\n\n# Define external systems\nexternal = [\n    {\"name\": \"Procore\", \"type\": \"project_management\"},\n    {\"name\": \"Revit\", \"type\": \"bim\"},\n    {\"name\": \"Primavera\", \"type\": \"scheduling\"}\n]\n\n# Define integration points\npoints = [\n    IntegrationPoint(\n        id=\"erp-procore\",\n        source_system=\"SAP S/4HANA\",\n        target_system=\"Procore\",\n        method=IntegrationMethod.API\n    ),\n    IntegrationPoint(\n        id=\"erp-primavera\",\n        source_system=\"SAP S/4HANA\",\n        target_system=\"Primavera\",\n        method=IntegrationMethod.FILE_EXPORT\n    )\n]\n\nanalysis = analyzer.analyze_erp_integration(\n    erp_system=erp,\n    external_systems=external,\n    integration_points=points\n)\n\nprint(f\"Integration Score: {analysis.integration_score:.0%}\")\nprint(f\"Bottlenecks: {len(analysis.bottlenecks)}\")"
      },
      {
        "title": "Monitor Integration Health",
        "body": "monitor = IntegrationHealthMonitor(integration_points)\n\nhealth = monitor.check_health()\nprint(f\"Status: {health['overall_status']}\")\n\nif health['issues']:\n    for issue in health['issues']:\n        print(f\"  - {issue['point']}: {issue['message']}\")\n\n# Generate report\nreport = monitor.get_health_report()\nprint(report)"
      },
      {
        "title": "Compare Integration Options",
        "body": "options = [\n    {\"name\": \"REST API Integration\", \"method\": \"api\", \"cost\": 50000, \"time\": \"3 months\"},\n    {\"name\": \"ETL Pipeline\", \"method\": \"etl\", \"cost\": 30000, \"time\": \"2 months\"},\n    {\"name\": \"File-based Export\", \"method\": \"file\", \"cost\": 10000, \"time\": \"1 month\"}\n]\n\ncomparison = analyzer.compare_integration_options(options)\nprint(f\"Recommended: {comparison['recommendation']}\")"
      },
      {
        "title": "Quick Reference",
        "body": "ComponentPurposeERPIntegrationAnalyzerMain analysis engineERPSystemERP system definitionERPModuleStandard ERP modulesIntegrationPointIntegration connectionDataFlowData flow mappingIntegrationHealthMonitorHealth monitoring"
      },
      {
        "title": "Resources",
        "body": "Book: \"Data-Driven Construction\" by Artem Boiko, Chapter 1.2\nWebsite: https://datadrivenconstruction.io"
      },
      {
        "title": "Next Steps",
        "body": "Use data-silo-detection to identify isolated systems\nUse etl-pipeline for data integration\nUse interoperability-analyzer for standards compliance"
      }
    ],
    "body": "ERP Integration Analysis\nOverview\n\nBased on DDC methodology (Chapter 1.2), this skill analyzes ERP system integration patterns in construction organizations, mapping data flows between modules and identifying optimization opportunities.\n\nBook Reference: \"Технологии и системы управления в современном строительстве\" / \"Technologies and Management Systems in Modern Construction\"\n\nQuick Start\nfrom dataclasses import dataclass, field\nfrom enum import Enum\nfrom typing import List, Dict, Optional, Set, Tuple\nfrom datetime import datetime\nimport json\n\nclass ERPModule(Enum):\n    \"\"\"Common ERP modules in construction\"\"\"\n    FINANCE = \"finance\"\n    PROJECT_MANAGEMENT = \"project_management\"\n    PROCUREMENT = \"procurement\"\n    INVENTORY = \"inventory\"\n    HR = \"human_resources\"\n    PAYROLL = \"payroll\"\n    EQUIPMENT = \"equipment\"\n    SUBCONTRACTS = \"subcontracts\"\n    BILLING = \"billing\"\n    COST_CONTROL = \"cost_control\"\n    DOCUMENT_MANAGEMENT = \"document_management\"\n    REPORTING = \"reporting\"\n\nclass IntegrationMethod(Enum):\n    \"\"\"Types of integration methods\"\"\"\n    API = \"api\"\n    DATABASE = \"database\"\n    FILE_EXPORT = \"file_export\"\n    MANUAL = \"manual\"\n    WEBHOOK = \"webhook\"\n    MESSAGE_QUEUE = \"message_queue\"\n    ETL = \"etl\"\n\nclass DataFlowDirection(Enum):\n    \"\"\"Direction of data flow\"\"\"\n    INBOUND = \"inbound\"\n    OUTBOUND = \"outbound\"\n    BIDIRECTIONAL = \"bidirectional\"\n\n@dataclass\nclass DataFlow:\n    \"\"\"Represents a data flow between systems/modules\"\"\"\n    source_module: str\n    target_module: str\n    data_type: str\n    frequency: str  # real-time, hourly, daily, weekly, manual\n    method: IntegrationMethod\n    direction: DataFlowDirection\n    volume: str  # low, medium, high\n    critical: bool = False\n    issues: List[str] = field(default_factory=list)\n\n@dataclass\nclass ERPSystem:\n    \"\"\"ERP system definition\"\"\"\n    name: str\n    vendor: str\n    version: str\n    modules: List[ERPModule]\n    database: str\n    has_api: bool\n    api_type: Optional[str] = None  # REST, SOAP, GraphQL\n    custom_modules: List[str] = field(default_factory=list)\n\n@dataclass\nclass IntegrationPoint:\n    \"\"\"Integration point between systems\"\"\"\n    id: str\n    source_system: str\n    target_system: str\n    method: IntegrationMethod\n    endpoint: Optional[str] = None\n    authentication: Optional[str] = None\n    data_format: str = \"json\"\n    status: str = \"active\"\n    reliability_score: float = 1.0\n    last_sync: Optional[datetime] = None\n\n@dataclass\nclass IntegrationAnalysis:\n    \"\"\"Complete integration analysis results\"\"\"\n    erp_system: ERPSystem\n    external_systems: List[str]\n    data_flows: List[DataFlow]\n    integration_points: List[IntegrationPoint]\n    integration_score: float\n    bottlenecks: List[str]\n    recommendations: List[str]\n    data_flow_diagram: Dict\n\n\nclass ERPIntegrationAnalyzer:\n    \"\"\"\n    Analyze ERP system integration for construction data flows.\n    Based on DDC methodology Chapter 1.2.\n    \"\"\"\n\n    def __init__(self):\n        self.module_dependencies = self._define_module_dependencies()\n        self.critical_flows = self._define_critical_flows()\n\n    def _define_module_dependencies(self) -> Dict[ERPModule, List[ERPModule]]:\n        \"\"\"Define typical module dependencies\"\"\"\n        return {\n            ERPModule.PROJECT_MANAGEMENT: [\n                ERPModule.COST_CONTROL,\n                ERPModule.PROCUREMENT,\n                ERPModule.HR,\n                ERPModule.DOCUMENT_MANAGEMENT\n            ],\n            ERPModule.COST_CONTROL: [\n                ERPModule.FINANCE,\n                ERPModule.PROJECT_MANAGEMENT,\n                ERPModule.BILLING\n            ],\n            ERPModule.PROCUREMENT: [\n                ERPModule.INVENTORY,\n                ERPModule.FINANCE,\n                ERPModule.SUBCONTRACTS\n            ],\n            ERPModule.BILLING: [\n                ERPModule.FINANCE,\n                ERPModule.PROJECT_MANAGEMENT,\n                ERPModule.COST_CONTROL\n            ],\n            ERPModule.PAYROLL: [\n                ERPModule.HR,\n                ERPModule.FINANCE,\n                ERPModule.PROJECT_MANAGEMENT\n            ],\n            ERPModule.INVENTORY: [\n                ERPModule.PROCUREMENT,\n                ERPModule.PROJECT_MANAGEMENT,\n                ERPModule.FINANCE\n            ],\n            ERPModule.EQUIPMENT: [\n                ERPModule.PROJECT_MANAGEMENT,\n                ERPModule.FINANCE,\n                ERPModule.INVENTORY\n            ],\n            ERPModule.SUBCONTRACTS: [\n                ERPModule.PROCUREMENT,\n                ERPModule.FINANCE,\n                ERPModule.PROJECT_MANAGEMENT\n            ]\n        }\n\n    def _define_critical_flows(self) -> List[Tuple[str, str]]:\n        \"\"\"Define business-critical data flows\"\"\"\n        return [\n            (\"project_management\", \"cost_control\"),\n            (\"cost_control\", \"finance\"),\n            (\"procurement\", \"inventory\"),\n            (\"billing\", \"finance\"),\n            (\"hr\", \"payroll\"),\n            (\"project_management\", \"billing\")\n        ]\n\n    def analyze_erp_integration(\n        self,\n        erp_system: ERPSystem,\n        external_systems: List[Dict],\n        integration_points: List[IntegrationPoint],\n        transaction_logs: Optional[List[Dict]] = None\n    ) -> IntegrationAnalysis:\n        \"\"\"\n        Perform comprehensive ERP integration analysis.\n\n        Args:\n            erp_system: The ERP system to analyze\n            external_systems: List of external systems\n            integration_points: Defined integration points\n            transaction_logs: Optional transaction logs for analysis\n\n        Returns:\n            Complete integration analysis\n        \"\"\"\n        # Map all data flows\n        data_flows = self._map_data_flows(\n            erp_system, integration_points, transaction_logs\n        )\n\n        # Calculate integration score\n        integration_score = self._calculate_integration_score(\n            erp_system, data_flows, integration_points\n        )\n\n        # Identify bottlenecks\n        bottlenecks = self._identify_bottlenecks(\n            data_flows, integration_points\n        )\n\n        # Generate recommendations\n        recommendations = self._generate_recommendations(\n            erp_system, data_flows, bottlenecks\n        )\n\n        # Create data flow diagram\n        diagram = self._create_flow_diagram(\n            erp_system, external_systems, data_flows\n        )\n\n        return IntegrationAnalysis(\n            erp_system=erp_system,\n            external_systems=[s[\"name\"] for s in external_systems],\n            data_flows=data_flows,\n            integration_points=integration_points,\n            integration_score=integration_score,\n            bottlenecks=bottlenecks,\n            recommendations=recommendations,\n            data_flow_diagram=diagram\n        )\n\n    def _map_data_flows(\n        self,\n        erp: ERPSystem,\n        integration_points: List[IntegrationPoint],\n        logs: Optional[List[Dict]]\n    ) -> List[DataFlow]:\n        \"\"\"Map all data flows in the system\"\"\"\n        flows = []\n\n        # Internal module flows\n        for module in erp.modules:\n            dependencies = self.module_dependencies.get(module, [])\n            for dep in dependencies:\n                if dep in erp.modules:\n                    is_critical = (module.value, dep.value) in self.critical_flows\n                    flows.append(DataFlow(\n                        source_module=module.value,\n                        target_module=dep.value,\n                        data_type=self._get_data_type(module, dep),\n                        frequency=\"real-time\",\n                        method=IntegrationMethod.DATABASE,\n                        direction=DataFlowDirection.BIDIRECTIONAL,\n                        volume=\"high\" if is_critical else \"medium\",\n                        critical=is_critical\n                    ))\n\n        # External integration flows\n        for point in integration_points:\n            if point.source_system == erp.name or point.target_system == erp.name:\n                flows.append(DataFlow(\n                    source_module=point.source_system,\n                    target_module=point.target_system,\n                    data_type=\"mixed\",\n                    frequency=self._infer_frequency(point),\n                    method=point.method,\n                    direction=DataFlowDirection.BIDIRECTIONAL,\n                    volume=\"medium\",\n                    critical=False\n                ))\n\n        # Analyze logs if available\n        if logs:\n            flows = self._enhance_flows_from_logs(flows, logs)\n\n        return flows\n\n    def _get_data_type(\n        self, source: ERPModule, target: ERPModule\n    ) -> str:\n        \"\"\"Determine data type for module pair\"\"\"\n        data_types = {\n            (ERPModule.PROJECT_MANAGEMENT, ERPModule.COST_CONTROL): \"costs_budgets\",\n            (ERPModule.COST_CONTROL, ERPModule.FINANCE): \"financial_transactions\",\n            (ERPModule.PROCUREMENT, ERPModule.INVENTORY): \"purchase_orders\",\n            (ERPModule.HR, ERPModule.PAYROLL): \"employee_time\",\n            (ERPModule.BILLING, ERPModule.FINANCE): \"invoices\"\n        }\n        return data_types.get((source, target), \"general_data\")\n\n    def _infer_frequency(self, point: IntegrationPoint) -> str:\n        \"\"\"Infer integration frequency from method\"\"\"\n        if point.method == IntegrationMethod.WEBHOOK:\n            return \"real-time\"\n        elif point.method == IntegrationMethod.API:\n            return \"hourly\"\n        elif point.method == IntegrationMethod.ETL:\n            return \"daily\"\n        elif point.method == IntegrationMethod.FILE_EXPORT:\n            return \"daily\"\n        else:\n            return \"manual\"\n\n    def _enhance_flows_from_logs(\n        self,\n        flows: List[DataFlow],\n        logs: List[Dict]\n    ) -> List[DataFlow]:\n        \"\"\"Enhance flow information from transaction logs\"\"\"\n        # Analyze log patterns\n        flow_stats = {}\n        for log in logs:\n            key = (log.get(\"source\"), log.get(\"target\"))\n            if key not in flow_stats:\n                flow_stats[key] = {\"count\": 0, \"errors\": 0}\n            flow_stats[key][\"count\"] += 1\n            if log.get(\"status\") == \"error\":\n                flow_stats[key][\"errors\"] += 1\n\n        # Update flows with statistics\n        for flow in flows:\n            key = (flow.source_module, flow.target_module)\n            if key in flow_stats:\n                stats = flow_stats[key]\n                error_rate = stats[\"errors\"] / stats[\"count\"] if stats[\"count\"] > 0 else 0\n                if error_rate > 0.1:\n                    flow.issues.append(f\"High error rate: {error_rate:.1%}\")\n                if stats[\"count\"] < 10:\n                    flow.issues.append(\"Low transaction volume\")\n\n        return flows\n\n    def _calculate_integration_score(\n        self,\n        erp: ERPSystem,\n        flows: List[DataFlow],\n        points: List[IntegrationPoint]\n    ) -> float:\n        \"\"\"Calculate overall integration score (0-1)\"\"\"\n        scores = []\n\n        # API availability\n        if erp.has_api:\n            scores.append(1.0)\n        else:\n            scores.append(0.3)\n\n        # Integration method quality\n        method_scores = {\n            IntegrationMethod.API: 1.0,\n            IntegrationMethod.WEBHOOK: 1.0,\n            IntegrationMethod.MESSAGE_QUEUE: 0.9,\n            IntegrationMethod.ETL: 0.8,\n            IntegrationMethod.DATABASE: 0.7,\n            IntegrationMethod.FILE_EXPORT: 0.5,\n            IntegrationMethod.MANUAL: 0.2\n        }\n\n        if points:\n            avg_method_score = sum(\n                method_scores.get(p.method, 0.5) for p in points\n            ) / len(points)\n            scores.append(avg_method_score)\n\n        # Critical flow coverage\n        critical_covered = sum(1 for f in flows if f.critical) / len(self.critical_flows)\n        scores.append(critical_covered)\n\n        # Flow health (issues)\n        flows_with_issues = sum(1 for f in flows if f.issues)\n        flow_health = 1 - (flows_with_issues / len(flows)) if flows else 1\n        scores.append(flow_health)\n\n        return sum(scores) / len(scores)\n\n    def _identify_bottlenecks(\n        self,\n        flows: List[DataFlow],\n        points: List[IntegrationPoint]\n    ) -> List[str]:\n        \"\"\"Identify integration bottlenecks\"\"\"\n        bottlenecks = []\n\n        # Manual integrations\n        manual_flows = [f for f in flows if f.method == IntegrationMethod.MANUAL]\n        if manual_flows:\n            bottlenecks.append(\n                f\"{len(manual_flows)} manual data flows requiring automation\"\n            )\n\n        # File-based integrations\n        file_flows = [f for f in flows if f.method == IntegrationMethod.FILE_EXPORT]\n        if file_flows:\n            bottlenecks.append(\n                f\"{len(file_flows)} file-based integrations causing delays\"\n            )\n\n        # Low reliability points\n        low_reliability = [p for p in points if p.reliability_score < 0.8]\n        if low_reliability:\n            bottlenecks.append(\n                f\"{len(low_reliability)} integration points with low reliability\"\n            )\n\n        # Flows with issues\n        problem_flows = [f for f in flows if f.issues]\n        for flow in problem_flows:\n            for issue in flow.issues:\n                bottlenecks.append(\n                    f\"{flow.source_module} → {flow.target_module}: {issue}\"\n                )\n\n        # Missing critical flows\n        existing_critical = {\n            (f.source_module, f.target_module) for f in flows if f.critical\n        }\n        for critical in self.critical_flows:\n            if critical not in existing_critical:\n                bottlenecks.append(\n                    f\"Missing critical flow: {critical[0]} → {critical[1]}\"\n                )\n\n        return bottlenecks\n\n    def _generate_recommendations(\n        self,\n        erp: ERPSystem,\n        flows: List[DataFlow],\n        bottlenecks: List[str]\n    ) -> List[str]:\n        \"\"\"Generate integration improvement recommendations\"\"\"\n        recommendations = []\n\n        # API recommendations\n        if not erp.has_api:\n            recommendations.append(\n                \"Enable API access for the ERP system to improve integration capabilities\"\n            )\n\n        # Method upgrades\n        manual_count = sum(1 for f in flows if f.method == IntegrationMethod.MANUAL)\n        if manual_count > 0:\n            recommendations.append(\n                f\"Automate {manual_count} manual data flows using API or ETL\"\n            )\n\n        file_count = sum(1 for f in flows if f.method == IntegrationMethod.FILE_EXPORT)\n        if file_count > 2:\n            recommendations.append(\n                \"Replace file-based integrations with real-time API connections\"\n            )\n\n        # Real-time integration\n        non_realtime = sum(\n            1 for f in flows\n            if f.critical and f.frequency not in [\"real-time\", \"hourly\"]\n        )\n        if non_realtime > 0:\n            recommendations.append(\n                f\"Upgrade {non_realtime} critical flows to real-time synchronization\"\n            )\n\n        # Data quality\n        if any(\"error rate\" in b.lower() for b in bottlenecks):\n            recommendations.append(\n                \"Implement data validation at integration points to reduce errors\"\n            )\n\n        # Monitoring\n        recommendations.append(\n            \"Implement integration monitoring dashboard for proactive issue detection\"\n        )\n\n        return recommendations\n\n    def _create_flow_diagram(\n        self,\n        erp: ERPSystem,\n        external_systems: List[Dict],\n        flows: List[DataFlow]\n    ) -> Dict:\n        \"\"\"Create data flow diagram structure\"\"\"\n        nodes = []\n        edges = []\n\n        # Add ERP modules as nodes\n        for module in erp.modules:\n            nodes.append({\n                \"id\": module.value,\n                \"type\": \"erp_module\",\n                \"label\": module.value.replace(\"_\", \" \").title(),\n                \"system\": erp.name\n            })\n\n        # Add external systems as nodes\n        for system in external_systems:\n            nodes.append({\n                \"id\": system[\"name\"],\n                \"type\": \"external\",\n                \"label\": system[\"name\"],\n                \"system\": \"external\"\n            })\n\n        # Add flows as edges\n        for flow in flows:\n            edges.append({\n                \"source\": flow.source_module,\n                \"target\": flow.target_module,\n                \"method\": flow.method.value,\n                \"frequency\": flow.frequency,\n                \"critical\": flow.critical,\n                \"data_type\": flow.data_type\n            })\n\n        return {\n            \"nodes\": nodes,\n            \"edges\": edges,\n            \"legend\": {\n                \"node_types\": [\"erp_module\", \"external\"],\n                \"edge_methods\": [m.value for m in IntegrationMethod]\n            }\n        }\n\n    def compare_integration_options(\n        self,\n        options: List[Dict]\n    ) -> Dict:\n        \"\"\"Compare different integration approaches\"\"\"\n        comparison = []\n\n        for option in options:\n            score = self._score_integration_option(option)\n            comparison.append({\n                \"name\": option[\"name\"],\n                \"method\": option.get(\"method\", \"unknown\"),\n                \"cost\": option.get(\"cost\", \"unknown\"),\n                \"implementation_time\": option.get(\"time\", \"unknown\"),\n                \"reliability\": score[\"reliability\"],\n                \"scalability\": score[\"scalability\"],\n                \"maintenance\": score[\"maintenance\"],\n                \"total_score\": score[\"total\"]\n            })\n\n        # Sort by total score\n        comparison.sort(key=lambda x: x[\"total_score\"], reverse=True)\n\n        return {\n            \"options\": comparison,\n            \"recommendation\": comparison[0][\"name\"] if comparison else None\n        }\n\n    def _score_integration_option(self, option: Dict) -> Dict:\n        \"\"\"Score an integration option\"\"\"\n        method = option.get(\"method\", \"\")\n\n        # Base scores by method\n        method_scores = {\n            \"api\": {\"reliability\": 0.9, \"scalability\": 0.9, \"maintenance\": 0.8},\n            \"etl\": {\"reliability\": 0.8, \"scalability\": 0.8, \"maintenance\": 0.7},\n            \"file\": {\"reliability\": 0.6, \"scalability\": 0.5, \"maintenance\": 0.6},\n            \"manual\": {\"reliability\": 0.4, \"scalability\": 0.2, \"maintenance\": 0.3}\n        }\n\n        scores = method_scores.get(method, {\"reliability\": 0.5, \"scalability\": 0.5, \"maintenance\": 0.5})\n        scores[\"total\"] = sum(scores.values()) / 3\n\n        return scores\n\n\nclass IntegrationHealthMonitor:\n    \"\"\"Monitor ERP integration health\"\"\"\n\n    def __init__(self, integration_points: List[IntegrationPoint]):\n        self.points = integration_points\n        self.history: List[Dict] = []\n\n    def check_health(self) -> Dict:\n        \"\"\"Check current integration health\"\"\"\n        results = {\n            \"timestamp\": datetime.now(),\n            \"overall_status\": \"healthy\",\n            \"points_checked\": len(self.points),\n            \"issues\": []\n        }\n\n        for point in self.points:\n            status = self._check_point(point)\n            if status[\"status\"] != \"healthy\":\n                results[\"issues\"].append({\n                    \"point\": point.id,\n                    \"status\": status[\"status\"],\n                    \"message\": status[\"message\"]\n                })\n\n        if len(results[\"issues\"]) > 0:\n            results[\"overall_status\"] = \"degraded\"\n        if len(results[\"issues\"]) > len(self.points) * 0.5:\n            results[\"overall_status\"] = \"critical\"\n\n        self.history.append(results)\n        return results\n\n    def _check_point(self, point: IntegrationPoint) -> Dict:\n        \"\"\"Check individual integration point\"\"\"\n        if point.status != \"active\":\n            return {\"status\": \"inactive\", \"message\": \"Integration point disabled\"}\n\n        if point.reliability_score < 0.5:\n            return {\"status\": \"degraded\", \"message\": \"Low reliability score\"}\n\n        if point.last_sync:\n            hours_since_sync = (datetime.now() - point.last_sync).total_seconds() / 3600\n            if hours_since_sync > 24:\n                return {\"status\": \"stale\", \"message\": f\"No sync for {hours_since_sync:.0f} hours\"}\n\n        return {\"status\": \"healthy\", \"message\": \"OK\"}\n\n    def get_health_report(self) -> str:\n        \"\"\"Generate health report\"\"\"\n        current = self.check_health()\n\n        report = f\"\"\"\n# ERP Integration Health Report\nGenerated: {current['timestamp'].strftime('%Y-%m-%d %H:%M')}\n\n## Overall Status: {current['overall_status'].upper()}\n\n### Integration Points: {current['points_checked']}\n### Active Issues: {len(current['issues'])}\n\"\"\"\n        if current['issues']:\n            report += \"\\n### Issues:\\n\"\n            for issue in current['issues']:\n                report += f\"- **{issue['point']}**: {issue['status']} - {issue['message']}\\n\"\n\n        return report\n\nCommon Use Cases\nAnalyze ERP Integration\nanalyzer = ERPIntegrationAnalyzer()\n\n# Define ERP system\nerp = ERPSystem(\n    name=\"SAP S/4HANA\",\n    vendor=\"SAP\",\n    version=\"2023\",\n    modules=[\n        ERPModule.FINANCE,\n        ERPModule.PROJECT_MANAGEMENT,\n        ERPModule.PROCUREMENT,\n        ERPModule.COST_CONTROL,\n        ERPModule.HR,\n        ERPModule.BILLING\n    ],\n    database=\"HANA\",\n    has_api=True,\n    api_type=\"REST\"\n)\n\n# Define external systems\nexternal = [\n    {\"name\": \"Procore\", \"type\": \"project_management\"},\n    {\"name\": \"Revit\", \"type\": \"bim\"},\n    {\"name\": \"Primavera\", \"type\": \"scheduling\"}\n]\n\n# Define integration points\npoints = [\n    IntegrationPoint(\n        id=\"erp-procore\",\n        source_system=\"SAP S/4HANA\",\n        target_system=\"Procore\",\n        method=IntegrationMethod.API\n    ),\n    IntegrationPoint(\n        id=\"erp-primavera\",\n        source_system=\"SAP S/4HANA\",\n        target_system=\"Primavera\",\n        method=IntegrationMethod.FILE_EXPORT\n    )\n]\n\nanalysis = analyzer.analyze_erp_integration(\n    erp_system=erp,\n    external_systems=external,\n    integration_points=points\n)\n\nprint(f\"Integration Score: {analysis.integration_score:.0%}\")\nprint(f\"Bottlenecks: {len(analysis.bottlenecks)}\")\n\nMonitor Integration Health\nmonitor = IntegrationHealthMonitor(integration_points)\n\nhealth = monitor.check_health()\nprint(f\"Status: {health['overall_status']}\")\n\nif health['issues']:\n    for issue in health['issues']:\n        print(f\"  - {issue['point']}: {issue['message']}\")\n\n# Generate report\nreport = monitor.get_health_report()\nprint(report)\n\nCompare Integration Options\noptions = [\n    {\"name\": \"REST API Integration\", \"method\": \"api\", \"cost\": 50000, \"time\": \"3 months\"},\n    {\"name\": \"ETL Pipeline\", \"method\": \"etl\", \"cost\": 30000, \"time\": \"2 months\"},\n    {\"name\": \"File-based Export\", \"method\": \"file\", \"cost\": 10000, \"time\": \"1 month\"}\n]\n\ncomparison = analyzer.compare_integration_options(options)\nprint(f\"Recommended: {comparison['recommendation']}\")\n\nQuick Reference\nComponent\tPurpose\nERPIntegrationAnalyzer\tMain analysis engine\nERPSystem\tERP system definition\nERPModule\tStandard ERP modules\nIntegrationPoint\tIntegration connection\nDataFlow\tData flow mapping\nIntegrationHealthMonitor\tHealth monitoring\nResources\nBook: \"Data-Driven Construction\" by Artem Boiko, Chapter 1.2\nWebsite: https://datadrivenconstruction.io\nNext Steps\nUse data-silo-detection to identify isolated systems\nUse etl-pipeline for data integration\nUse interoperability-analyzer for standards compliance"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/datadrivenconstruction/erp-integration-analysis",
    "publisherUrl": "https://clawhub.ai/datadrivenconstruction/erp-integration-analysis",
    "owner": "datadrivenconstruction",
    "version": "2.1.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/erp-integration-analysis",
    "downloadUrl": "https://openagent3.xyz/downloads/erp-integration-analysis",
    "agentUrl": "https://openagent3.xyz/skills/erp-integration-analysis/agent",
    "manifestUrl": "https://openagent3.xyz/skills/erp-integration-analysis/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/erp-integration-analysis/agent.md"
  }
}