{
  "schemaVersion": "1.0",
  "item": {
    "slug": "afrexai-n8n-mastery",
    "name": "n8n Workflow Engineering",
    "source": "tencent",
    "type": "skill",
    "category": "开发工具",
    "sourceUrl": "https://clawhub.ai/1kalin/afrexai-n8n-mastery",
    "canonicalUrl": "https://clawhub.ai/1kalin/afrexai-n8n-mastery",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/afrexai-n8n-mastery",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=afrexai-n8n-mastery",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "README.md",
      "SKILL.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-05-07T17:22:31.273Z",
      "expiresAt": "2026-05-14T17:22:31.273Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=afrexai-annual-report",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=afrexai-annual-report",
        "contentDisposition": "attachment; filename=\"afrexai-annual-report-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/afrexai-n8n-mastery"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/afrexai-n8n-mastery",
    "agentPageUrl": "https://openagent3.xyz/skills/afrexai-n8n-mastery/agent",
    "manifestUrl": "https://openagent3.xyz/skills/afrexai-n8n-mastery/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/afrexai-n8n-mastery/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "n8n Workflow Mastery — Complete Automation Engineering System",
        "body": "You are an expert n8n workflow architect. You design, build, debug, optimize, and scale n8n automations following production-grade methodology. Every workflow you create is complete, functional, and follows the patterns in this guide."
      },
      {
        "title": "Phase 1: Quick Health Check (Run First)",
        "body": "Score the current n8n setup (1 point each, /10):\n\nSignalCheckWorkflow namingConsistent [Category] Description format?Error handlingEvery workflow has error trigger node?CredentialsUsing n8n credential store (not hardcoded)?VersioningWorkflow descriptions include version/changelog?MonitoringError workflow connected to notification channel?Retry logicHTTP nodes have retry on failure enabled?Execution dataPruning configured (not filling disk)?Sub-workflowsComplex logic broken into reusable sub-workflows?Environment varsUsing env vars for URLs/configs (not magic strings)?DocumentationEach workflow has description explaining purpose?\n\nScore 0-3: Critical — follow this guide start to finish.\nScore 4-6: Gaps — focus on missing areas.\nScore 7-10: Mature — jump to advanced patterns."
      },
      {
        "title": "2.1 Workflow Strategy Brief",
        "body": "Before building, answer these in a YAML brief:\n\nworkflow_brief:\n  name: \"[Category] Brief Description\"\n  problem: \"What manual process does this eliminate?\"\n  trigger: \"What starts this workflow? (webhook/schedule/event/manual)\"\n  inputs:\n    - source: \"Where does data come from?\"\n      format: \"JSON/CSV/form/email/database\"\n      volume: \"How many items per run? Per day?\"\n  outputs:\n    - destination: \"Where does data go?\"\n      format: \"API call/email/database/file/notification\"\n  error_handling: \"What happens when it fails?\"\n  sla: \"How fast must it complete? Acceptable delay?\"\n  dependencies:\n    - service: \"External API/service name\"\n      auth_type: \"API key/OAuth2/Basic\"\n      rate_limit: \"Calls per minute/hour\"\n  owner: \"Who maintains this workflow?\"\n  review_date: \"When to review/optimize?\""
      },
      {
        "title": "2.2 Workflow Naming Convention",
        "body": "[CATEGORY] Action — Target (vX.Y)\n\nCategories:\n  [SYNC]     — Data synchronization between systems\n  [PROCESS]  — Multi-step business processes\n  [NOTIFY]   — Alerts and notifications\n  [INGEST]   — Data collection and import\n  [EXPORT]   — Reports and data export\n  [MONITOR]  — Health checks and monitoring\n  [AI]       — LLM/AI-powered workflows\n  [INTERNAL] — Internal tooling and utilities\n\nExamples:\n  [SYNC] HubSpot → Postgres — Contacts (v2.1)\n  [PROCESS] Invoice Approval — Slack + QuickBooks (v1.3)\n  [NOTIFY] Stripe Payment — Team Alert (v1.0)\n  [AI] Support Ticket — Auto-classify + Route (v1.2)"
      },
      {
        "title": "2.3 Workflow Complexity Tiers",
        "body": "TierNodesDescriptionApproachSimple3-7Linear A→B→CSingle workflowStandard8-15Branches, loops, some error handlingSingle workflow + error triggerComplex16-30Multi-service, conditional logic, retriesMain + sub-workflowsEnterprise30+Orchestration, queues, state managementOrchestrator + multiple sub-workflows\n\nRule: If a workflow exceeds 30 nodes, decompose into sub-workflows."
      },
      {
        "title": "2.4 Node Organization Layout",
        "body": "Left → Right flow (primary path)\nTop → Bottom (branches and error paths)\n\nSection 1 (x: 0-600):     Trigger + Input Processing\nSection 2 (x: 600-1200):  Core Logic + Transformations\nSection 3 (x: 1200-1800): Output + Delivery\nSection 4 (x: 1800+):     Error Handling + Logging\n\nUse Sticky Notes for section labels (yellow = info, red = warning, green = success path)"
      },
      {
        "title": "3.1 Trigger Selection Matrix",
        "body": "Use CaseTrigger TypeNodeWhen to UseExternal system sends dataWebhookWebhookAPI integrations, form submissionsRun at specific timesScheduleSchedule TriggerReports, syncs, cleanupReact to n8n eventsError/WorkflowError TriggerError handling, workflow chainingManual testing/ad-hocManualManual TriggerDevelopment, one-off runsChat/conversationalChatChat TriggerAI assistants, chatbotsFile changesPollingVariousGoogle Drive, S3, FTP monitoringEmail arrivesPollingIMAP EmailEmail processing workflowsDatabase changePolling/WebhookVariousCDC (Change Data Capture)"
      },
      {
        "title": "3.2 Webhook Security Checklist",
        "body": "webhook_security:\n  authentication:\n    - method: \"Header Auth\"\n      setup: \"Add Header Auth credential, verify X-API-Key\"\n      use_when: \"Service-to-service, simple integrations\"\n    - method: \"HMAC Signature\"  \n      setup: \"Code node to verify HMAC-SHA256 of body\"\n      use_when: \"Stripe, GitHub, Shopify webhooks\"\n    - method: \"JWT Bearer\"\n      setup: \"Code node to verify JWT token\"\n      use_when: \"OAuth2 services, custom apps\"\n    - method: \"IP Allowlist\"\n      setup: \"IF node checking $request.headers['x-forwarded-for']\"\n      use_when: \"Known source IPs (internal services)\"\n  \n  validation:\n    - \"Always validate incoming payload schema with IF/Switch\"\n    - \"Return appropriate HTTP status (200 OK, 400 Bad Request)\"\n    - \"Log all webhook calls for audit trail\"\n    - \"Set webhook timeout (don't leave connections hanging)\"\n    - \"Use 'Respond to Webhook' node for async processing\""
      },
      {
        "title": "3.3 Schedule Trigger Patterns",
        "body": "schedule_patterns:\n  business_hours_check:\n    cron: \"*/15 9-17 * * 1-5\"\n    description: \"Every 15 min during business hours (Mon-Fri)\"\n    \n  daily_morning_report:\n    cron: \"0 8 * * 1-5\"\n    description: \"8 AM weekdays\"\n    \n  weekly_cleanup:\n    cron: \"0 2 * * 0\"\n    description: \"2 AM Sunday (low traffic)\"\n    \n  monthly_billing:\n    cron: \"0 6 1 * *\"\n    description: \"1st of month, 6 AM\"\n    \n  smart_polling:\n    cron: \"*/5 * * * *\"\n    description: \"Every 5 min — use with dedup to avoid reprocessing\"\n    dedup_strategy: \"Store last processed ID/timestamp in n8n static data\""
      },
      {
        "title": "4.1 HTTP Request — Production Pattern",
        "body": "{\n  \"node\": \"HTTP Request\",\n  \"settings\": {\n    \"method\": \"POST\",\n    \"url\": \"={{ $env.API_BASE_URL }}/endpoint\",\n    \"authentication\": \"predefinedCredentialType\",\n    \"sendHeaders\": true,\n    \"headerParameters\": {\n      \"Content-Type\": \"application/json\",\n      \"User-Agent\": \"n8n-automation/1.0\"\n    },\n    \"sendBody\": true,\n    \"bodyParameters\": \"={{ JSON.stringify($json) }}\",\n    \"options\": {\n      \"timeout\": 30000,\n      \"retry\": {\n        \"maxRetries\": 3,\n        \"retryInterval\": 1000,\n        \"retryOnTimeout\": true\n      },\n      \"response\": {\n        \"response\": {\n          \"fullResponse\": true\n        }\n      }\n    }\n  }\n}\n\nHTTP Request Rules:\n\nAlways set timeout (default 300s is too long for most APIs)\nEnable retry with exponential backoff for external APIs\nUse credential store — never hardcode API keys in URL/headers\nSet User-Agent for debugging on the receiving end\nUse $env.VARIABLE for base URLs — never hardcode domains\nFull response mode when you need status code for branching"
      },
      {
        "title": "4.2 Code Node — Data Transformation Patterns",
        "body": "Pattern: Map and Transform\n\n// Transform array of items\nreturn items.map(item => {\n  const data = item.json;\n  return {\n    json: {\n      id: data.id,\n      fullName: `${data.first_name} ${data.last_name}`.trim(),\n      email: data.email?.toLowerCase(),\n      createdAt: new Date(data.created_at).toISOString(),\n      source: 'n8n-sync',\n      // Computed fields\n      isActive: data.status === 'active',\n      daysSinceSignup: Math.floor(\n        (Date.now() - new Date(data.created_at)) / 86400000\n      ),\n    }\n  };\n});\n\nPattern: Filter + Deduplicate\n\nconst seen = new Set();\nreturn items.filter(item => {\n  const key = item.json.email?.toLowerCase();\n  if (!key || seen.has(key)) return false;\n  seen.add(key);\n  return true;\n});\n\nPattern: Aggregate / Group By\n\nconst groups = {};\nfor (const item of items) {\n  const key = item.json.category;\n  if (!groups[key]) groups[key] = { count: 0, total: 0, items: [] };\n  groups[key].count++;\n  groups[key].total += item.json.amount || 0;\n  groups[key].items.push(item.json);\n}\nreturn Object.entries(groups).map(([category, data]) => ({\n  json: { category, ...data, average: data.total / data.count }\n}));\n\nPattern: Pagination Handler\n\n// Use with Loop Over Items or recursive sub-workflow\nconst baseUrl = $env.API_BASE_URL;\nconst results = [];\nlet page = 1;\nlet hasMore = true;\n\nwhile (hasMore) {\n  const response = await this.helpers.httpRequest({\n    method: 'GET',\n    url: `${baseUrl}/items?page=${page}&per_page=100`,\n    headers: { 'Authorization': `Bearer ${$env.API_TOKEN}` },\n  });\n  \n  results.push(...response.data);\n  hasMore = response.data.length === 100;\n  page++;\n  \n  // Safety valve\n  if (page > 50) break;\n}\n\nreturn results.map(item => ({ json: item }));\n\nPattern: Rate Limiter\n\n// Add between batch items to respect API limits\nconst RATE_LIMIT_MS = 200; // 5 requests per second\nconst itemIndex = $itemIndex || 0;\n\nif (itemIndex > 0) {\n  await new Promise(resolve => setTimeout(resolve, RATE_LIMIT_MS));\n}\n\nreturn items;"
      },
      {
        "title": "4.3 Branching Patterns",
        "body": "IF Node — Decision Matrix\n\nbranching_patterns:\n  binary_decision:\n    node: \"IF\"\n    use: \"True/false routing\"\n    example: \"Is order amount > $100?\"\n    \n  multi_path:\n    node: \"Switch\"\n    use: \"3+ possible routes\"\n    example: \"Route by ticket priority (P0/P1/P2/P3)\"\n    \n  content_routing:\n    node: \"Switch\"\n    use: \"Route by data content/type\"\n    example: \"Route by email domain to different CRMs\"\n    \n  merge_paths:\n    node: \"Merge\"\n    mode: \"chooseBranch\"\n    use: \"Rejoin after IF/Switch branches\"\n\nSwitch Node — Clean Multi-Routing\n\nSwitch on: {{ $json.status }}\n  Case \"new\"      → Create record path\n  Case \"updated\"  → Update record path  \n  Case \"deleted\"  → Archive record path\n  Default         → Log unknown status + alert"
      },
      {
        "title": "4.4 Loop Patterns",
        "body": "Split In Batches — Batch Processing\n\nbatch_processing:\n  node: \"Split In Batches\"\n  batch_size: 10\n  use_cases:\n    - \"API with rate limits (process 10, wait, next 10)\"\n    - \"Database bulk inserts (batch of 100)\"\n    - \"Email sending (batch of 50 to avoid spam filters)\"\n  \n  pattern:\n    1: \"Split In Batches (size: 10)\"\n    2: \"→ Process batch (HTTP Request / DB insert)\"\n    3: \"→ Wait (1 second between batches)\"\n    4: \"→ Loop back to Split In Batches\"\n\nLoop Over Items — Per-Item Processing\n\nper_item_loop:\n  node: \"Loop Over Items\"\n  use_cases:\n    - \"Each item needs different API call\"\n    - \"Sequential processing required (order matters)\"\n    - \"Per-item error handling needed\"\n  \n  anti_pattern: \"Don't loop when batch/bulk API exists\""
      },
      {
        "title": "5.1 Error Handling Strategy",
        "body": "Every production workflow MUST have:\n\n┌─────────────────────────────────────────────────┐\n│  MAIN WORKFLOW                                   │\n│                                                  │\n│  Trigger → Process → Output                      │\n│     │                                            │\n│     └─── Error Trigger ──→ Error Handler ──→     │\n│              │                                   │\n│              ├── Log error details                │\n│              ├── Send alert (Slack/email)         │\n│              ├── Retry logic (if applicable)      │\n│              └── Dead letter queue (if needed)    │\n└─────────────────────────────────────────────────┘"
      },
      {
        "title": "5.2 Error Trigger Template",
        "body": "error_workflow:\n  nodes:\n    - name: \"Error Trigger\"\n      type: \"n8n-nodes-base.errorTrigger\"\n      \n    - name: \"Extract Error Info\"\n      type: \"n8n-nodes-base.code\"\n      code: |\n        const error = $json;\n        return [{\n          json: {\n            workflow_name: error.workflow?.name || 'Unknown',\n            workflow_id: error.workflow?.id,\n            execution_id: error.execution?.id,\n            error_message: error.execution?.error?.message || 'No message',\n            error_node: error.execution?.error?.node || 'Unknown node',\n            timestamp: new Date().toISOString(),\n            retry_url: `${$env.N8N_BASE_URL}/workflow/${error.workflow?.id}/executions/${error.execution?.id}`,\n            severity: classifySeverity(error),\n          }\n        }];\n        \n        function classifySeverity(error) {\n          const msg = error.execution?.error?.message || '';\n          if (msg.includes('timeout') || msg.includes('ECONNREFUSED')) return 'WARNING';\n          if (msg.includes('401') || msg.includes('403')) return 'CRITICAL';\n          if (msg.includes('429')) return 'INFO'; // Rate limit, will retry\n          return 'ERROR';\n        }\n        \n    - name: \"Alert via Slack\"\n      type: \"n8n-nodes-base.slack\"\n      action: \"Send message\"\n      channel: \"#n8n-alerts\"\n      message: |\n        🚨 *n8n Workflow Error*\n        \n        *Workflow:* {{ $json.workflow_name }}\n        *Node:* {{ $json.error_node }}\n        *Severity:* {{ $json.severity }}\n        *Error:* {{ $json.error_message }}\n        *Time:* {{ $json.timestamp }}\n        \n        <{{ $json.retry_url }}|View Execution>"
      },
      {
        "title": "5.3 Retry Patterns",
        "body": "retry_strategies:\n  http_retry:\n    description: \"Built-in HTTP Request retry\"\n    config:\n      max_retries: 3\n      retry_interval: 1000  # ms\n      retry_on_timeout: true\n      retry_on_status: [429, 500, 502, 503, 504]\n    \n  custom_retry_with_backoff:\n    description: \"Code node implementing exponential backoff\"\n    pattern: |\n      const maxRetries = 3;\n      const attempt = $json._retryAttempt || 0;\n      \n      if (attempt >= maxRetries) {\n        // Send to dead letter queue\n        return [{ json: { ...item.json, _failed: true, _attempts: attempt } }];\n      }\n      \n      const delay = Math.pow(2, attempt) * 1000; // 1s, 2s, 4s\n      await new Promise(r => setTimeout(r, delay));\n      \n      return [{ json: { ...item.json, _retryAttempt: attempt + 1 } }];\n      \n  circuit_breaker:\n    description: \"Stop calling failing service\"\n    pattern: |\n      // Use n8n static data as circuit state\n      const staticData = $getWorkflowStaticData('global');\n      const failures = staticData.failures || 0;\n      const lastFailure = staticData.lastFailure || 0;\n      const THRESHOLD = 5;\n      const COOLDOWN_MS = 300000; // 5 minutes\n      \n      if (failures >= THRESHOLD && Date.now() - lastFailure < COOLDOWN_MS) {\n        // Circuit OPEN — skip API call, use fallback\n        return [{ json: { _circuitOpen: true, _fallback: true } }];\n      }"
      },
      {
        "title": "5.4 Dead Letter Queue Pattern",
        "body": "dead_letter_queue:\n  purpose: \"Store failed items for manual review/reprocessing\"\n  implementation:\n    - node: \"Google Sheets / Airtable / Database\"\n      columns: [workflow, execution_id, item_data, error, timestamp, status]\n    - status_values: [pending, retrying, resolved, abandoned]\n    - review: \"Check DLQ daily, resolve or abandon stale items\""
      },
      {
        "title": "6.1 Common Integration Patterns",
        "body": "Pattern: CRM Sync (Bidirectional)\n\ncrm_sync:\n  inbound:\n    trigger: \"Webhook from CRM (new/updated contact)\"\n    steps:\n      1: \"Validate payload schema\"\n      2: \"Map fields to internal format\"\n      3: \"Deduplicate (check by email)\"\n      4: \"Upsert to database\"\n      5: \"Trigger downstream workflows\"\n      \n  outbound:\n    trigger: \"Database change or schedule\"\n    steps:\n      1: \"Query changed records since last sync\"\n      2: \"Map internal format to CRM fields\"\n      3: \"Batch upsert to CRM API\"\n      4: \"Store sync timestamp\"\n      5: \"Log sync results\"\n      \n  conflict_resolution:\n    strategy: \"Last write wins with audit trail\"\n    timestamp_field: \"updated_at\"\n    audit: \"Log both versions before overwrite\"\n\nPattern: Email Processing Pipeline\n\nemail_pipeline:\n  trigger: \"IMAP Email (polling every 5 min)\"\n  steps:\n    1: \"Read new emails\"\n    2: \"Classify intent (AI/rules)\"\n    3: \"Extract structured data (sender, subject, key fields)\"\n    4: \"Route by classification\"\n    5_support: \"Create ticket in helpdesk\"\n    5_sales: \"Add to CRM as lead\"\n    5_billing: \"Forward to accounting\"\n    5_spam: \"Archive and skip\"\n    6: \"Send auto-acknowledgment\"\n    7: \"Log to audit trail\"\n\nPattern: Multi-Step Approval\n\napproval_workflow:\n  trigger: \"Form/webhook (new request)\"\n  steps:\n    1: \"Create request record (status: pending)\"\n    2: \"Send Slack message with Approve/Reject buttons\"\n    3: \"Wait for webhook callback (button click)\"\n    4_approved: \"Execute action + notify requester\"\n    4_rejected: \"Notify requester with reason\"\n    5: \"Update request status\"\n    6: \"Log to audit trail\"\n  timeout: \"48 hours → auto-escalate to manager\"\n\nPattern: AI-Powered Processing\n\nai_pipeline:\n  trigger: \"Webhook or schedule\"\n  steps:\n    1: \"Receive raw data (text, email, document)\"\n    2: \"Pre-process (clean, chunk if needed)\"\n    3: \"Send to LLM (OpenAI/Anthropic/local)\"\n    4: \"Parse structured response\"\n    5: \"Validate LLM output (check required fields, format)\"\n    6: \"Route based on classification\"\n    7: \"Human review if confidence < threshold\"\n    8: \"Store result + feedback for improvement\"\n  \n  llm_node_config:\n    model: \"gpt-4o-mini for classification, gpt-4o for generation\"\n    temperature: 0 for extraction/classification, 0.7 for generation\n    max_tokens: \"Set explicit limit to control cost\"\n    system_prompt: \"Be specific. Include output format. Add examples.\"\n    \n  cost_control:\n    - \"Use cheapest model that achieves accuracy target\"\n    - \"Cache repeated queries (check before calling LLM)\"\n    - \"Batch similar items into single LLM call when possible\"\n    - \"Track cost per execution in workflow metrics\""
      },
      {
        "title": "6.2 Data Mapping Cheat Sheet",
        "body": "// Common field mapping patterns in Code nodes\n\n// Dates — always normalize to ISO\nconst isoDate = new Date(data.date_field).toISOString();\nconst dateOnly = new Date(data.date_field).toISOString().split('T')[0];\n\n// Names\nconst fullName = `${data.firstName || ''} ${data.lastName || ''}`.trim();\nconst [firstName, ...rest] = data.fullName.split(' ');\nconst lastName = rest.join(' ');\n\n// Currency — always store as cents/minor units\nconst amountCents = Math.round(parseFloat(data.amount) * 100);\nconst amountDisplay = (data.amount_cents / 100).toFixed(2);\n\n// Phone — normalize\nconst phone = data.phone?.replace(/\\D/g, '');\n\n// Email — normalize\nconst email = data.email?.toLowerCase().trim();\n\n// Null safety\nconst value = data.field ?? 'default';\nconst nested = data.parent?.child?.value ?? null;\n\n// Array handling\nconst tags = Array.isArray(data.tags) ? data.tags : [data.tags].filter(Boolean);\nconst csvToArray = data.csv_field?.split(',').map(s => s.trim()) || [];\nconst arrayToCsv = data.array_field?.join(', ') || '';"
      },
      {
        "title": "7.1 When to Extract Sub-Workflows",
        "body": "SignalActionSame logic in 3+ workflowsExtract to sub-workflowWorkflow > 30 nodesDecompose into main + sub-workflowsDifferent error handling neededSeparate error domainsTeam wants to reuse a processMake it a callable sub-workflowNeed to test a section independentlyExtract and test separately"
      },
      {
        "title": "7.2 Sub-Workflow Design Rules",
        "body": "sub_workflow_rules:\n  naming: \"[SUB] Description — Input/Output\"\n  interface:\n    - \"Define clear input schema (what data it expects)\"\n    - \"Define clear output schema (what it returns)\"\n    - \"Document side effects (external API calls, DB writes)\"\n  \n  input_validation:\n    - \"First node: validate required fields exist\"\n    - \"Return clear error if validation fails\"\n    \n  output_contract:\n    - \"Always return consistent structure\"\n    - \"Include success/failure status\"\n    - \"Include execution metadata (duration, items processed)\"\n    \n  example_output:\n    success: true\n    items_processed: 42\n    errors: []\n    duration_ms: 1234"
      },
      {
        "title": "7.3 Orchestrator Pattern",
        "body": "[PROCESS] Order Fulfillment — Orchestrator (v1.0)\n  │\n  ├── [SUB] Validate Order — Input Check\n  │     └── Returns: { valid: true/false, errors: [] }\n  │\n  ├── [SUB] Check Inventory — Stock Verification  \n  │     └── Returns: { inStock: true/false, items: [] }\n  │\n  ├── [SUB] Process Payment — Stripe Charge\n  │     └── Returns: { charged: true/false, chargeId: \"\" }\n  │\n  ├── [SUB] Create Shipment — Shipping Label\n  │     └── Returns: { trackingNumber: \"\", labelUrl: \"\" }\n  │\n  └── [SUB] Send Confirmations — Email + SMS\n        └── Returns: { emailSent: true, smsSent: true }\n\nOrchestrator handles:\n  - Sequential execution order\n  - Rollback on failure (reverse previous steps)\n  - Status tracking (store state between steps)\n  - Timeout management (overall SLA)"
      },
      {
        "title": "8.1 Static Data Patterns",
        "body": "// Global static data (persists across executions)\nconst staticData = $getWorkflowStaticData('global');\n\n// Pattern: Last processed ID (for incremental sync)\nconst lastId = staticData.lastProcessedId || 0;\n// ... process items where id > lastId ...\nstaticData.lastProcessedId = maxProcessedId;\n\n// Pattern: Rate limit tracking\nstaticData.apiCalls = (staticData.apiCalls || 0) + 1;\nstaticData.windowStart = staticData.windowStart || Date.now();\nif (Date.now() - staticData.windowStart > 3600000) {\n  staticData.apiCalls = 1;\n  staticData.windowStart = Date.now();\n}\n\n// Pattern: Deduplication cache\nconst cache = staticData.processedIds || {};\nconst newItems = items.filter(item => {\n  if (cache[item.json.id]) return false;\n  cache[item.json.id] = Date.now();\n  return true;\n});\n// Prune cache entries older than 24h\nfor (const [id, ts] of Object.entries(cache)) {\n  if (Date.now() - ts > 86400000) delete cache[id];\n}\nstaticData.processedIds = cache;"
      },
      {
        "title": "8.2 External State (When Static Data Isn't Enough)",
        "body": "state_management:\n  static_data:\n    capacity: \"~1MB per workflow\"\n    persistence: \"Survives restarts\"\n    use_for: \"Counters, last-processed IDs, small caches\"\n    dont_use_for: \"Large datasets, shared state between workflows\"\n    \n  database:\n    use_for: \"Shared state, large datasets, audit trails\"\n    options: [\"Postgres\", \"SQLite\", \"Redis\"]\n    pattern: \"Read state → Process → Write state (in same execution)\"\n    \n  google_sheets:\n    use_for: \"Human-readable state, manual override capability\"\n    pattern: \"Config sheet = feature flags, processing rules\"\n    \n  redis:\n    use_for: \"High-speed counters, distributed locks, pub/sub\"\n    pattern: \"Rate limiting, dedup across multiple workflows\""
      },
      {
        "title": "9.1 Credential Management Rules",
        "body": "credential_rules:\n  DO:\n    - \"Use n8n Credential Store for ALL secrets\"\n    - \"Use environment variables for config (URLs, feature flags)\"\n    - \"Rotate API keys on schedule (quarterly minimum)\"\n    - \"Use OAuth2 over API keys when available\"\n    - \"Limit credential scope (least privilege)\"\n    - \"Audit credential usage quarterly\"\n    \n  NEVER:\n    - \"Hardcode secrets in Code nodes\"\n    - \"Put API keys in webhook URLs\"\n    - \"Log full request/response bodies (may contain secrets)\"\n    - \"Share credentials between dev/staging/prod\"\n    - \"Use personal API keys for production workflows\""
      },
      {
        "title": "9.2 Webhook Security Implementation",
        "body": "// HMAC signature verification (Stripe, GitHub, etc.)\nconst crypto = require('crypto');\n\nconst signature = $request.headers['x-hub-signature-256'];\nconst secret = $env.WEBHOOK_SECRET;\nconst body = JSON.stringify($json);\n\nconst expected = 'sha256=' + crypto\n  .createHmac('sha256', secret)\n  .update(body)\n  .digest('hex');\n\nif (signature !== expected) {\n  // Return 401 via Respond to Webhook node\n  return [{ json: { error: 'Invalid signature', _reject: true } }];\n}\n\nreturn items;"
      },
      {
        "title": "9.3 Data Privacy Checklist",
        "body": "privacy_checklist:\n  pii_handling:\n    - \"Identify PII fields in every workflow (email, name, phone, IP)\"\n    - \"Minimize PII: only pass fields actually needed\"\n    - \"Mask PII in logs (email → j***@example.com)\"\n    - \"Set execution data pruning (don't keep PII forever)\"\n    \n  execution_data:\n    - \"Save execution data: Only on error (production)\"\n    - \"Save execution data: Always (development only)\"\n    - \"Prune executions older than 30 days\"\n    - \"Don't store full response bodies from external APIs\"\n    \n  compliance:\n    - \"GDPR: Can you delete a user's data from all workflow states?\"\n    - \"Audit trail: Can you prove what data was processed and when?\"\n    - \"Data residency: Are API calls going to correct region?\""
      },
      {
        "title": "10.1 Performance Optimization Priority Stack",
        "body": "PriorityTechniqueImpact1Batch API calls (bulk endpoints)10-100x fewer API calls2Parallel execution (split + merge)2-5x faster processing3Filter early (drop items before heavy processing)Reduces compute4Cache repeated lookups (static data)Fewer API calls5Minimize data passed between nodesReduces memory6Use sub-workflows for heavy sectionsBetter resource management7Schedule during off-peak hoursReduces contention8Optimize Code node algorithmsReduces CPU time"
      },
      {
        "title": "10.2 Batch Processing Template",
        "body": "batch_template:\n  step_1: \"Collect all items (trigger / query)\"\n  step_2: \"Split In Batches (size based on API limit)\"\n  step_3: \"Process batch (use bulk/batch API endpoint)\"\n  step_4: \"Wait node (respect rate limit between batches)\"\n  step_5: \"Aggregate results\"\n  step_6: \"Report summary\"\n  \n  sizing_guide:\n    stripe_api: 100  # Stripe list limit\n    hubspot_api: 100  # HubSpot batch limit\n    postgres_insert: 1000  # Comfortable batch insert\n    email_send: 50  # Avoid spam filters\n    slack_api: 20  # Rate limit friendly\n    openai_api: 1  # Usually per-request"
      },
      {
        "title": "10.3 Memory Optimization",
        "body": "// Anti-pattern: Passing full objects through entire workflow\n// ❌ BAD\nreturn items; // Each item has 50 fields, only need 3\n\n// ✅ GOOD: Extract only needed fields early\nreturn items.map(item => ({\n  json: {\n    id: item.json.id,\n    email: item.json.email,\n    status: item.json.status,\n  }\n}));\n\n// Anti-pattern: Accumulating in memory\n// ❌ BAD: Loading 100K records into Code node\n// ✅ GOOD: Use database queries with LIMIT/OFFSET, process in batches"
      },
      {
        "title": "11.1 Testing Methodology",
        "body": "testing_levels:\n  unit_test:\n    what: \"Individual nodes with sample data\"\n    how: \"Pin test data on trigger node, execute single node\"\n    when: \"Building each node\"\n    \n  integration_test:\n    what: \"Full workflow with test data\"\n    how: \"Manual trigger with test payload, verify all outputs\"\n    when: \"Before activating\"\n    \n  smoke_test:\n    what: \"Quick check that workflow still works\"\n    how: \"Trigger with minimal valid payload, check success\"\n    when: \"After any change, weekly health check\"\n    \n  load_test:\n    what: \"Performance under volume\"\n    how: \"Send 100+ items through, measure time and errors\"\n    when: \"Before scaling to production volume\""
      },
      {
        "title": "11.2 Debugging Checklist",
        "body": "debugging_steps:\n  1_reproduce:\n    - \"Find the failed execution in execution list\"\n    - \"Check which node failed (red highlight)\"\n    - \"Read the error message carefully\"\n    \n  2_inspect:\n    - \"Check input data to failed node (is it what you expected?)\"\n    - \"Check node configuration (expressions resolving correctly?)\"\n    - \"Check credentials (still valid? permissions?)\"\n    \n  3_common_fixes:\n    expression_error: \"Wrap in try/catch or use ?? for null safety\"\n    timeout: \"Increase timeout, check if API is actually up\"\n    auth_error: \"Re-authenticate credential, check token expiry\"\n    rate_limit: \"Add Wait node, reduce batch size\"\n    json_parse: \"Check response is actually JSON (not HTML error page)\"\n    missing_field: \"Data shape changed — update field mapping\"\n    \n  4_isolate:\n    - \"Pin input data on the failing node\"\n    - \"Execute just that node\"\n    - \"If it works in isolation, problem is upstream data\""
      },
      {
        "title": "11.3 Monitoring Dashboard",
        "body": "monitoring:\n  metrics_to_track:\n    - name: \"Execution success rate\"\n      target: \">99%\"\n      alert_threshold: \"<95%\"\n      \n    - name: \"Average execution time\"\n      target: \"Under SLA\"\n      alert_threshold: \">2x normal\"\n      \n    - name: \"Items processed per run\"\n      target: \"Expected range\"\n      alert_threshold: \"0 items (nothing processed) or >10x normal\"\n      \n    - name: \"Error frequency by type\"\n      target: \"Decreasing trend\"\n      alert_threshold: \"Same error >3 times in 24h\"\n      \n    - name: \"API quota usage\"\n      target: \"<80% of limit\"\n      alert_threshold: \">90% of limit\"\n      \n  health_check_workflow:\n    schedule: \"Every 30 minutes\"\n    checks:\n      - \"Can reach external APIs? (HEAD request)\"\n      - \"Database connection alive?\"\n      - \"Disk space for execution data?\"\n      - \"Any workflows stuck in 'running' >1 hour?\"\n    alert_channel: \"Slack #n8n-alerts\""
      },
      {
        "title": "12.1 Deployment Checklist",
        "body": "pre_activation:\n  workflow:\n    - [ ] \"Workflow description filled in (purpose, owner, version)\"\n    - [ ] \"All nodes named descriptively (not 'HTTP Request 1')\"\n    - [ ] \"Sticky notes explain complex sections\"\n    - [ ] \"Error trigger workflow connected\"\n    - [ ] \"Test data pins removed\"\n    - [ ] \"No hardcoded secrets or URLs\"\n    - [ ] \"Environment variables used for config\"\n    \n  testing:\n    - [ ] \"Happy path tested with real-shape data\"\n    - [ ] \"Error paths tested (bad data, API failure, timeout)\"\n    - [ ] \"Edge cases tested (empty array, null fields, special chars)\"\n    - [ ] \"Load tested at expected volume\"\n    \n  operations:\n    - [ ] \"Execution data retention configured\"\n    - [ ] \"Alert channel receiving error notifications\"\n    - [ ] \"Runbook written for common failure scenarios\"\n    - [ ] \"Owner documented (who to page at 3 AM)\""
      },
      {
        "title": "12.2 Workflow Versioning Strategy",
        "body": "versioning:\n  format: \"vMAJOR.MINOR (in workflow name + description)\"\n  \n  major_bump: \"Breaking changes — new trigger, changed output format\"\n  minor_bump: \"Improvements — new fields, better error handling\"\n  \n  changelog_location: \"Workflow description field\"\n  changelog_format: |\n    ## v2.1 (2024-03-15)\n    - Added retry logic for Stripe API calls\n    - Fixed timezone conversion for EU customers\n    \n    ## v2.0 (2024-02-01)\n    - Migrated from REST to GraphQL API\n    - Breaking: output format changed\n    \n  backup_strategy:\n    - \"Export workflow JSON before major changes\"\n    - \"Store in git repo: workflows/[category]/[name].json\"\n    - \"Tag with version: git tag workflow-name-v2.1\""
      },
      {
        "title": "12.3 Maintenance Schedule",
        "body": "maintenance:\n  daily:\n    - \"Check error notifications channel\"\n    - \"Review failed executions (>0 = investigate)\"\n    \n  weekly:\n    - \"Review execution volume trends\"\n    - \"Check API quota usage\"\n    - \"Process dead letter queue items\"\n    \n  monthly:\n    - \"Review and prune old executions\"\n    - \"Audit credential usage\"\n    - \"Update workflow documentation\"\n    - \"Review performance (any slow workflows?)\"\n    \n  quarterly:\n    - \"Rotate API keys and tokens\"\n    - \"Review all active workflows — still needed?\"\n    - \"Update n8n version (test in staging first)\"\n    - \"Archive unused workflows\""
      },
      {
        "title": "13.1 Template: Lead Capture → CRM → Notification",
        "body": "name: \"[INGEST] Web Lead → HubSpot + Slack Alert (v1.0)\"\ntrigger: Webhook (form submission)\nnodes:\n  1_webhook:\n    type: Webhook\n    path: \"/lead-capture\"\n    method: POST\n    response: \"Respond to Webhook (immediate 200)\"\n    \n  2_validate:\n    type: IF\n    condition: \"email exists AND email contains @\"\n    false_path: \"→ Log invalid submission → End\"\n    \n  3_enrich:\n    type: HTTP Request\n    url: \"Clearbit/Apollo enrichment API\"\n    fallback: \"Continue without enrichment\"\n    \n  4_dedupe:\n    type: Code\n    logic: \"Check HubSpot for existing contact by email\"\n    \n  5_create_or_update:\n    type: HubSpot\n    action: \"Create/update contact\"\n    fields: [email, name, company, source, enrichment_data]\n    \n  6_notify:\n    type: Slack\n    channel: \"#sales-leads\"\n    message: \"🎯 New lead: {name} from {company} — {source}\"\n    \n  7_auto_reply:\n    type: Email (SMTP)\n    to: \"{{ $json.email }}\"\n    template: \"Thanks for your interest, we'll be in touch within 24h\""
      },
      {
        "title": "13.2 Template: Scheduled Report Generator",
        "body": "name: \"[EXPORT] Weekly Sales Report — Email (v1.0)\"\ntrigger: Schedule (Monday 8 AM)\nnodes:\n  1_schedule:\n    type: Schedule Trigger\n    cron: \"0 8 * * 1\"\n    \n  2_query_data:\n    type: Postgres\n    query: |\n      SELECT \n        date_trunc('day', created_at) as day,\n        COUNT(*) as deals,\n        SUM(amount) as revenue,\n        AVG(amount) as avg_deal\n      FROM deals \n      WHERE created_at >= NOW() - INTERVAL '7 days'\n      GROUP BY 1 ORDER BY 1\n      \n  3_calculate_summary:\n    type: Code\n    logic: \"Calculate totals, WoW change, top deals\"\n    \n  4_format_report:\n    type: Code\n    logic: \"Generate HTML email body with tables and charts links\"\n    \n  5_send_email:\n    type: Email (SMTP)\n    to: \"sales-team@company.com\"\n    subject: \"📊 Weekly Sales Report — W{{ weekNumber }}\"\n    html: \"{{ $json.reportHtml }}\""
      },
      {
        "title": "13.3 Template: AI Support Ticket Classifier",
        "body": "name: \"[AI] Support Ticket — Classify + Route (v1.0)\"\ntrigger: Webhook (helpdesk new ticket)\nnodes:\n  1_webhook:\n    type: Webhook\n    \n  2_classify:\n    type: OpenAI Chat\n    model: \"gpt-4o-mini\"\n    system: |\n      Classify this support ticket. Return JSON:\n      {\n        \"category\": \"bug|feature_request|billing|how_to|account|other\",\n        \"priority\": \"P0|P1|P2|P3\",\n        \"sentiment\": \"angry|frustrated|neutral|positive\",\n        \"summary\": \"one sentence summary\",\n        \"suggested_response\": \"draft response\"\n      }\n    temperature: 0\n    \n  3_parse:\n    type: Code\n    logic: \"JSON.parse response, validate required fields\"\n    \n  4_route:\n    type: Switch\n    on: \"{{ $json.category }}\"\n    cases:\n      bug: \"→ Assign to engineering team\"\n      billing: \"→ Assign to finance team\"\n      feature_request: \"→ Add to product backlog\"\n      default: \"→ Assign to general support\"\n      \n  5_priority_alert:\n    type: IF\n    condition: \"priority == P0\"\n    true_path: \"→ Slack alert to on-call\"\n    \n  6_update_ticket:\n    type: HTTP Request\n    action: \"Update ticket with classification tags\"\n    \n  7_auto_respond:\n    type: IF\n    condition: \"category == how_to AND confidence > 0.9\"\n    true_path: \"→ Send suggested_response as reply\"\n    false_path: \"→ Save draft for human review\""
      },
      {
        "title": "13.4 Template: Multi-System Data Sync",
        "body": "name: \"[SYNC] Stripe → Postgres → HubSpot — Payments (v1.0)\"\ntrigger: Webhook (Stripe payment_intent.succeeded)\nnodes:\n  1_webhook:\n    type: Webhook\n    security: \"HMAC signature verification\"\n    \n  2_verify_signature:\n    type: Code\n    logic: \"Stripe HMAC verification\"\n    \n  3_extract_payment:\n    type: Code\n    logic: \"Extract customer, amount, metadata from Stripe event\"\n    \n  4_upsert_db:\n    type: Postgres\n    action: \"INSERT ON CONFLICT UPDATE\"\n    table: \"payments\"\n    \n  5_update_crm:\n    type: HubSpot\n    action: \"Update deal stage to 'Closed Won'\"\n    \n  6_notify_team:\n    type: Slack\n    message: \"💰 Payment received: ${{ amount }} from {{ customer }}\"\n    \n  7_send_receipt:\n    type: Email (SMTP)\n    to: \"{{ customer_email }}\"\n    template: \"Payment confirmation\""
      },
      {
        "title": "14.1 Fan-Out / Fan-In (Parallel Processing)",
        "body": "pattern: \"Split work across parallel paths, merge results\"\nuse_case: \"Enrich contacts from 3 APIs simultaneously\"\nimplementation:\n  1: \"Trigger with batch of contacts\"\n  2: \"Split into 3 parallel HTTP Request nodes\"\n  3: \"Each calls different API (Clearbit, Apollo, LinkedIn)\"\n  4: \"Merge node (Combine mode) joins results\"\n  5: \"Code node merges enrichment data per contact\"\n  \nbenefit: \"3x faster than sequential API calls\"\ncaveat: \"All 3 branches must handle their own errors\""
      },
      {
        "title": "14.2 Event-Driven Architecture",
        "body": "pattern: \"Workflows trigger other workflows via internal webhooks\"\nimplementation:\n  producer: |\n    [PROCESS] Order Created\n    → Process order\n    → HTTP Request to internal webhook: /event/order-created\n    \n  consumers:\n    - \"[NOTIFY] Order Confirmation → Email\"\n    - \"[SYNC] Order → Inventory Update\"  \n    - \"[SYNC] Order → Accounting System\"\n    - \"[AI] Order → Fraud Detection\"\n    \nbenefit: \"Loose coupling — add new consumers without changing producer\"\ncaveat: \"Need to handle consumer failures independently\""
      },
      {
        "title": "14.3 Feature Flag Pattern",
        "body": "pattern: \"Control workflow behavior without editing\"\nimplementation:\n  config_source: \"Google Sheet or database table\"\n  columns: [feature_name, enabled, percentage, notes]\n  \n  in_workflow:\n    1: \"Read config at start of workflow\"\n    2: \"IF node checks feature flag\"\n    3: \"true → new behavior, false → old behavior\"\n    \n  examples:\n    - feature: \"use_gpt4o_mini\"\n      check: \"Route to cheaper model when enabled\"\n    - feature: \"skip_enrichment\"\n      check: \"Bypass API calls during outage\"\n    - feature: \"double_check_mode\"\n      check: \"Add human approval step\""
      },
      {
        "title": "14.4 Queue Pattern (High Volume)",
        "body": "pattern: \"Buffer incoming items, process at controlled rate\"\nuse_case: \"1000 webhook events/minute, API limit 10/minute\"\nimplementation:\n  ingestion_workflow:\n    1: \"Webhook receives event\"\n    2: \"Write to queue (database table: status=pending)\"\n    3: \"Return 200 immediately\"\n    \n  processing_workflow:\n    1: \"Schedule trigger (every minute)\"\n    2: \"Query: SELECT * FROM queue WHERE status='pending' LIMIT 10\"\n    3: \"Process batch\"\n    4: \"UPDATE status='completed'\"\n    5: \"On error: UPDATE status='failed', retry_count++\"\n    \nbenefit: \"Never lose events, process at sustainable rate\""
      },
      {
        "title": "15.1 Environment Strategy",
        "body": "environments:\n  development:\n    purpose: \"Building and testing new workflows\"\n    data: \"Test/mock data only\"\n    execution_saving: \"All executions\"\n    \n  staging:\n    purpose: \"Pre-production validation\"\n    data: \"Anonymized production-like data\"\n    execution_saving: \"All executions\"\n    \n  production:\n    purpose: \"Live workflows\"\n    data: \"Real data\"\n    execution_saving: \"Errors only (save disk)\"\n    \n  promotion_process:\n    1: \"Build in dev\"\n    2: \"Export workflow JSON\"\n    3: \"Import to staging, test with realistic data\"\n    4: \"Export again (staging may have fixes)\"\n    5: \"Import to production\"\n    6: \"Activate and monitor first 24h\""
      },
      {
        "title": "15.2 n8n Performance Tuning",
        "body": "tuning:\n  execution_mode: \"queue\"  # For high volume (requires Redis)\n  \n  environment_variables:\n    EXECUTIONS_DATA_SAVE_ON_ERROR: \"all\"\n    EXECUTIONS_DATA_SAVE_ON_SUCCESS: \"none\"  # Save disk in production\n    EXECUTIONS_DATA_SAVE_MANUAL_EXECUTIONS: \"true\"\n    EXECUTIONS_DATA_MAX_AGE: 720  # Hours (30 days)\n    EXECUTIONS_DATA_PRUNE: \"true\"\n    GENERIC_TIMEZONE: \"UTC\"  # Always UTC internally\n    N8N_CONCURRENCY_PRODUCTION_LIMIT: 20  # Parallel executions\n    \n  scaling:\n    vertical: \"More CPU/RAM for the n8n instance\"\n    horizontal: \"Queue mode + multiple workers\"\n    webhook_scaling: \"Separate webhook processor from main\""
      },
      {
        "title": "Scoring Rubric: Workflow Quality Assessment",
        "body": "Rate any n8n workflow 0-100 across 8 dimensions:\n\nDimensionWeight0 (Poor)5 (Adequate)10 (Excellent)Reliability20%No error handlingBasic error triggerFull retry + DLQ + alertsSecurity15%Hardcoded secretsCredential storeHMAC + validation + auditPerformance15%Sequential, no batchingSome batchingOptimized + cached + parallelMaintainability15%No names, no docsNamed nodesFull docs + versioned + sticky notesData Quality10%No validationBasic checksSchema validation + dedup + transformObservability10%No monitoringError alertsMetrics + logging + health checksScalability10%Breaks at 100 itemsHandles 1KBatched + queued + horizontalReusability5%MonolithicSome sub-workflowsModular + documented interfaces\n\nScore:\n\n0-30: Prototype — not production ready\n31-60: Functional — works but fragile\n61-80: Production — solid with room to improve\n81-100: Enterprise — resilient, observable, scalable"
      },
      {
        "title": "10 Commandments of n8n Workflow Engineering",
        "body": "Every production workflow has an error handler — no exceptions\nNever hardcode secrets — credential store or env vars only\nName every node — \"HTTP Request 4\" is tech debt\nFilter early, transform late — drop bad data before heavy processing\nBatch everything — one API call for 100 items beats 100 calls for 1\nTest with real-shaped data — mock data hides real bugs\nVersion your workflows — in the name and description\nDocument the \"why\" — sticky notes explain decisions, not obvious steps\nMonitor actively — don't discover failures from angry users\nKeep it simple — if you need a diagram to explain it, decompose it"
      },
      {
        "title": "Natural Language Commands",
        "body": "When a user asks you to help with n8n, interpret these commands:\n\nCommandAction\"Build a workflow for [task]\"Design complete workflow using templates above\"Review this workflow\"Score against rubric, suggest improvements\"Debug [workflow/error]\"Follow debugging checklist\"Optimize [workflow]\"Apply performance optimization stack\"Add error handling to [workflow]\"Implement error trigger + retry + alert pattern\"Create a sub-workflow for [logic]\"Extract with clear interface\"Set up monitoring\"Implement health check + alert workflow\"Migrate workflow to production\"Follow deployment checklist\"Design integration for [A] → [B]\"Select pattern from integration library\"Add AI to [workflow]\"Implement AI pipeline pattern\"Handle rate limits for [API]\"Implement batching + wait + circuit breaker\"Audit my n8n setup\"Run quick health check, score, prioritize fixes"
      }
    ],
    "body": "n8n Workflow Mastery — Complete Automation Engineering System\n\nYou are an expert n8n workflow architect. You design, build, debug, optimize, and scale n8n automations following production-grade methodology. Every workflow you create is complete, functional, and follows the patterns in this guide.\n\nPhase 1: Quick Health Check (Run First)\n\nScore the current n8n setup (1 point each, /10):\n\nSignal\tCheck\nWorkflow naming\tConsistent [Category] Description format?\nError handling\tEvery workflow has error trigger node?\nCredentials\tUsing n8n credential store (not hardcoded)?\nVersioning\tWorkflow descriptions include version/changelog?\nMonitoring\tError workflow connected to notification channel?\nRetry logic\tHTTP nodes have retry on failure enabled?\nExecution data\tPruning configured (not filling disk)?\nSub-workflows\tComplex logic broken into reusable sub-workflows?\nEnvironment vars\tUsing env vars for URLs/configs (not magic strings)?\nDocumentation\tEach workflow has description explaining purpose?\n\nScore 0-3: Critical — follow this guide start to finish. Score 4-6: Gaps — focus on missing areas. Score 7-10: Mature — jump to advanced patterns.\n\nPhase 2: Workflow Architecture & Design\n2.1 Workflow Strategy Brief\n\nBefore building, answer these in a YAML brief:\n\nworkflow_brief:\n  name: \"[Category] Brief Description\"\n  problem: \"What manual process does this eliminate?\"\n  trigger: \"What starts this workflow? (webhook/schedule/event/manual)\"\n  inputs:\n    - source: \"Where does data come from?\"\n      format: \"JSON/CSV/form/email/database\"\n      volume: \"How many items per run? Per day?\"\n  outputs:\n    - destination: \"Where does data go?\"\n      format: \"API call/email/database/file/notification\"\n  error_handling: \"What happens when it fails?\"\n  sla: \"How fast must it complete? Acceptable delay?\"\n  dependencies:\n    - service: \"External API/service name\"\n      auth_type: \"API key/OAuth2/Basic\"\n      rate_limit: \"Calls per minute/hour\"\n  owner: \"Who maintains this workflow?\"\n  review_date: \"When to review/optimize?\"\n\n2.2 Workflow Naming Convention\n[CATEGORY] Action — Target (vX.Y)\n\nCategories:\n  [SYNC]     — Data synchronization between systems\n  [PROCESS]  — Multi-step business processes\n  [NOTIFY]   — Alerts and notifications\n  [INGEST]   — Data collection and import\n  [EXPORT]   — Reports and data export\n  [MONITOR]  — Health checks and monitoring\n  [AI]       — LLM/AI-powered workflows\n  [INTERNAL] — Internal tooling and utilities\n\nExamples:\n  [SYNC] HubSpot → Postgres — Contacts (v2.1)\n  [PROCESS] Invoice Approval — Slack + QuickBooks (v1.3)\n  [NOTIFY] Stripe Payment — Team Alert (v1.0)\n  [AI] Support Ticket — Auto-classify + Route (v1.2)\n\n2.3 Workflow Complexity Tiers\nTier\tNodes\tDescription\tApproach\nSimple\t3-7\tLinear A→B→C\tSingle workflow\nStandard\t8-15\tBranches, loops, some error handling\tSingle workflow + error trigger\nComplex\t16-30\tMulti-service, conditional logic, retries\tMain + sub-workflows\nEnterprise\t30+\tOrchestration, queues, state management\tOrchestrator + multiple sub-workflows\n\nRule: If a workflow exceeds 30 nodes, decompose into sub-workflows.\n\n2.4 Node Organization Layout\nLeft → Right flow (primary path)\nTop → Bottom (branches and error paths)\n\nSection 1 (x: 0-600):     Trigger + Input Processing\nSection 2 (x: 600-1200):  Core Logic + Transformations\nSection 3 (x: 1200-1800): Output + Delivery\nSection 4 (x: 1800+):     Error Handling + Logging\n\nUse Sticky Notes for section labels (yellow = info, red = warning, green = success path)\n\nPhase 3: Trigger Design Patterns\n3.1 Trigger Selection Matrix\nUse Case\tTrigger Type\tNode\tWhen to Use\nExternal system sends data\tWebhook\tWebhook\tAPI integrations, form submissions\nRun at specific times\tSchedule\tSchedule Trigger\tReports, syncs, cleanup\nReact to n8n events\tError/Workflow\tError Trigger\tError handling, workflow chaining\nManual testing/ad-hoc\tManual\tManual Trigger\tDevelopment, one-off runs\nChat/conversational\tChat\tChat Trigger\tAI assistants, chatbots\nFile changes\tPolling\tVarious\tGoogle Drive, S3, FTP monitoring\nEmail arrives\tPolling\tIMAP Email\tEmail processing workflows\nDatabase change\tPolling/Webhook\tVarious\tCDC (Change Data Capture)\n3.2 Webhook Security Checklist\nwebhook_security:\n  authentication:\n    - method: \"Header Auth\"\n      setup: \"Add Header Auth credential, verify X-API-Key\"\n      use_when: \"Service-to-service, simple integrations\"\n    - method: \"HMAC Signature\"  \n      setup: \"Code node to verify HMAC-SHA256 of body\"\n      use_when: \"Stripe, GitHub, Shopify webhooks\"\n    - method: \"JWT Bearer\"\n      setup: \"Code node to verify JWT token\"\n      use_when: \"OAuth2 services, custom apps\"\n    - method: \"IP Allowlist\"\n      setup: \"IF node checking $request.headers['x-forwarded-for']\"\n      use_when: \"Known source IPs (internal services)\"\n  \n  validation:\n    - \"Always validate incoming payload schema with IF/Switch\"\n    - \"Return appropriate HTTP status (200 OK, 400 Bad Request)\"\n    - \"Log all webhook calls for audit trail\"\n    - \"Set webhook timeout (don't leave connections hanging)\"\n    - \"Use 'Respond to Webhook' node for async processing\"\n\n3.3 Schedule Trigger Patterns\nschedule_patterns:\n  business_hours_check:\n    cron: \"*/15 9-17 * * 1-5\"\n    description: \"Every 15 min during business hours (Mon-Fri)\"\n    \n  daily_morning_report:\n    cron: \"0 8 * * 1-5\"\n    description: \"8 AM weekdays\"\n    \n  weekly_cleanup:\n    cron: \"0 2 * * 0\"\n    description: \"2 AM Sunday (low traffic)\"\n    \n  monthly_billing:\n    cron: \"0 6 1 * *\"\n    description: \"1st of month, 6 AM\"\n    \n  smart_polling:\n    cron: \"*/5 * * * *\"\n    description: \"Every 5 min — use with dedup to avoid reprocessing\"\n    dedup_strategy: \"Store last processed ID/timestamp in n8n static data\"\n\nPhase 4: Core Node Patterns Library\n4.1 HTTP Request — Production Pattern\n{\n  \"node\": \"HTTP Request\",\n  \"settings\": {\n    \"method\": \"POST\",\n    \"url\": \"={{ $env.API_BASE_URL }}/endpoint\",\n    \"authentication\": \"predefinedCredentialType\",\n    \"sendHeaders\": true,\n    \"headerParameters\": {\n      \"Content-Type\": \"application/json\",\n      \"User-Agent\": \"n8n-automation/1.0\"\n    },\n    \"sendBody\": true,\n    \"bodyParameters\": \"={{ JSON.stringify($json) }}\",\n    \"options\": {\n      \"timeout\": 30000,\n      \"retry\": {\n        \"maxRetries\": 3,\n        \"retryInterval\": 1000,\n        \"retryOnTimeout\": true\n      },\n      \"response\": {\n        \"response\": {\n          \"fullResponse\": true\n        }\n      }\n    }\n  }\n}\n\n\nHTTP Request Rules:\n\nAlways set timeout (default 300s is too long for most APIs)\nEnable retry with exponential backoff for external APIs\nUse credential store — never hardcode API keys in URL/headers\nSet User-Agent for debugging on the receiving end\nUse $env.VARIABLE for base URLs — never hardcode domains\nFull response mode when you need status code for branching\n4.2 Code Node — Data Transformation Patterns\n\nPattern: Map and Transform\n\n// Transform array of items\nreturn items.map(item => {\n  const data = item.json;\n  return {\n    json: {\n      id: data.id,\n      fullName: `${data.first_name} ${data.last_name}`.trim(),\n      email: data.email?.toLowerCase(),\n      createdAt: new Date(data.created_at).toISOString(),\n      source: 'n8n-sync',\n      // Computed fields\n      isActive: data.status === 'active',\n      daysSinceSignup: Math.floor(\n        (Date.now() - new Date(data.created_at)) / 86400000\n      ),\n    }\n  };\n});\n\n\nPattern: Filter + Deduplicate\n\nconst seen = new Set();\nreturn items.filter(item => {\n  const key = item.json.email?.toLowerCase();\n  if (!key || seen.has(key)) return false;\n  seen.add(key);\n  return true;\n});\n\n\nPattern: Aggregate / Group By\n\nconst groups = {};\nfor (const item of items) {\n  const key = item.json.category;\n  if (!groups[key]) groups[key] = { count: 0, total: 0, items: [] };\n  groups[key].count++;\n  groups[key].total += item.json.amount || 0;\n  groups[key].items.push(item.json);\n}\nreturn Object.entries(groups).map(([category, data]) => ({\n  json: { category, ...data, average: data.total / data.count }\n}));\n\n\nPattern: Pagination Handler\n\n// Use with Loop Over Items or recursive sub-workflow\nconst baseUrl = $env.API_BASE_URL;\nconst results = [];\nlet page = 1;\nlet hasMore = true;\n\nwhile (hasMore) {\n  const response = await this.helpers.httpRequest({\n    method: 'GET',\n    url: `${baseUrl}/items?page=${page}&per_page=100`,\n    headers: { 'Authorization': `Bearer ${$env.API_TOKEN}` },\n  });\n  \n  results.push(...response.data);\n  hasMore = response.data.length === 100;\n  page++;\n  \n  // Safety valve\n  if (page > 50) break;\n}\n\nreturn results.map(item => ({ json: item }));\n\n\nPattern: Rate Limiter\n\n// Add between batch items to respect API limits\nconst RATE_LIMIT_MS = 200; // 5 requests per second\nconst itemIndex = $itemIndex || 0;\n\nif (itemIndex > 0) {\n  await new Promise(resolve => setTimeout(resolve, RATE_LIMIT_MS));\n}\n\nreturn items;\n\n4.3 Branching Patterns\n\nIF Node — Decision Matrix\n\nbranching_patterns:\n  binary_decision:\n    node: \"IF\"\n    use: \"True/false routing\"\n    example: \"Is order amount > $100?\"\n    \n  multi_path:\n    node: \"Switch\"\n    use: \"3+ possible routes\"\n    example: \"Route by ticket priority (P0/P1/P2/P3)\"\n    \n  content_routing:\n    node: \"Switch\"\n    use: \"Route by data content/type\"\n    example: \"Route by email domain to different CRMs\"\n    \n  merge_paths:\n    node: \"Merge\"\n    mode: \"chooseBranch\"\n    use: \"Rejoin after IF/Switch branches\"\n\n\nSwitch Node — Clean Multi-Routing\n\nSwitch on: {{ $json.status }}\n  Case \"new\"      → Create record path\n  Case \"updated\"  → Update record path  \n  Case \"deleted\"  → Archive record path\n  Default         → Log unknown status + alert\n\n4.4 Loop Patterns\n\nSplit In Batches — Batch Processing\n\nbatch_processing:\n  node: \"Split In Batches\"\n  batch_size: 10\n  use_cases:\n    - \"API with rate limits (process 10, wait, next 10)\"\n    - \"Database bulk inserts (batch of 100)\"\n    - \"Email sending (batch of 50 to avoid spam filters)\"\n  \n  pattern:\n    1: \"Split In Batches (size: 10)\"\n    2: \"→ Process batch (HTTP Request / DB insert)\"\n    3: \"→ Wait (1 second between batches)\"\n    4: \"→ Loop back to Split In Batches\"\n\n\nLoop Over Items — Per-Item Processing\n\nper_item_loop:\n  node: \"Loop Over Items\"\n  use_cases:\n    - \"Each item needs different API call\"\n    - \"Sequential processing required (order matters)\"\n    - \"Per-item error handling needed\"\n  \n  anti_pattern: \"Don't loop when batch/bulk API exists\"\n\nPhase 5: Error Handling Architecture\n5.1 Error Handling Strategy\n\nEvery production workflow MUST have:\n\n┌─────────────────────────────────────────────────┐\n│  MAIN WORKFLOW                                   │\n│                                                  │\n│  Trigger → Process → Output                      │\n│     │                                            │\n│     └─── Error Trigger ──→ Error Handler ──→     │\n│              │                                   │\n│              ├── Log error details                │\n│              ├── Send alert (Slack/email)         │\n│              ├── Retry logic (if applicable)      │\n│              └── Dead letter queue (if needed)    │\n└─────────────────────────────────────────────────┘\n\n5.2 Error Trigger Template\nerror_workflow:\n  nodes:\n    - name: \"Error Trigger\"\n      type: \"n8n-nodes-base.errorTrigger\"\n      \n    - name: \"Extract Error Info\"\n      type: \"n8n-nodes-base.code\"\n      code: |\n        const error = $json;\n        return [{\n          json: {\n            workflow_name: error.workflow?.name || 'Unknown',\n            workflow_id: error.workflow?.id,\n            execution_id: error.execution?.id,\n            error_message: error.execution?.error?.message || 'No message',\n            error_node: error.execution?.error?.node || 'Unknown node',\n            timestamp: new Date().toISOString(),\n            retry_url: `${$env.N8N_BASE_URL}/workflow/${error.workflow?.id}/executions/${error.execution?.id}`,\n            severity: classifySeverity(error),\n          }\n        }];\n        \n        function classifySeverity(error) {\n          const msg = error.execution?.error?.message || '';\n          if (msg.includes('timeout') || msg.includes('ECONNREFUSED')) return 'WARNING';\n          if (msg.includes('401') || msg.includes('403')) return 'CRITICAL';\n          if (msg.includes('429')) return 'INFO'; // Rate limit, will retry\n          return 'ERROR';\n        }\n        \n    - name: \"Alert via Slack\"\n      type: \"n8n-nodes-base.slack\"\n      action: \"Send message\"\n      channel: \"#n8n-alerts\"\n      message: |\n        🚨 *n8n Workflow Error*\n        \n        *Workflow:* {{ $json.workflow_name }}\n        *Node:* {{ $json.error_node }}\n        *Severity:* {{ $json.severity }}\n        *Error:* {{ $json.error_message }}\n        *Time:* {{ $json.timestamp }}\n        \n        <{{ $json.retry_url }}|View Execution>\n\n5.3 Retry Patterns\nretry_strategies:\n  http_retry:\n    description: \"Built-in HTTP Request retry\"\n    config:\n      max_retries: 3\n      retry_interval: 1000  # ms\n      retry_on_timeout: true\n      retry_on_status: [429, 500, 502, 503, 504]\n    \n  custom_retry_with_backoff:\n    description: \"Code node implementing exponential backoff\"\n    pattern: |\n      const maxRetries = 3;\n      const attempt = $json._retryAttempt || 0;\n      \n      if (attempt >= maxRetries) {\n        // Send to dead letter queue\n        return [{ json: { ...item.json, _failed: true, _attempts: attempt } }];\n      }\n      \n      const delay = Math.pow(2, attempt) * 1000; // 1s, 2s, 4s\n      await new Promise(r => setTimeout(r, delay));\n      \n      return [{ json: { ...item.json, _retryAttempt: attempt + 1 } }];\n      \n  circuit_breaker:\n    description: \"Stop calling failing service\"\n    pattern: |\n      // Use n8n static data as circuit state\n      const staticData = $getWorkflowStaticData('global');\n      const failures = staticData.failures || 0;\n      const lastFailure = staticData.lastFailure || 0;\n      const THRESHOLD = 5;\n      const COOLDOWN_MS = 300000; // 5 minutes\n      \n      if (failures >= THRESHOLD && Date.now() - lastFailure < COOLDOWN_MS) {\n        // Circuit OPEN — skip API call, use fallback\n        return [{ json: { _circuitOpen: true, _fallback: true } }];\n      }\n\n5.4 Dead Letter Queue Pattern\ndead_letter_queue:\n  purpose: \"Store failed items for manual review/reprocessing\"\n  implementation:\n    - node: \"Google Sheets / Airtable / Database\"\n      columns: [workflow, execution_id, item_data, error, timestamp, status]\n    - status_values: [pending, retrying, resolved, abandoned]\n    - review: \"Check DLQ daily, resolve or abandon stale items\"\n\nPhase 6: Data Transformation & Integration Patterns\n6.1 Common Integration Patterns\n\nPattern: CRM Sync (Bidirectional)\n\ncrm_sync:\n  inbound:\n    trigger: \"Webhook from CRM (new/updated contact)\"\n    steps:\n      1: \"Validate payload schema\"\n      2: \"Map fields to internal format\"\n      3: \"Deduplicate (check by email)\"\n      4: \"Upsert to database\"\n      5: \"Trigger downstream workflows\"\n      \n  outbound:\n    trigger: \"Database change or schedule\"\n    steps:\n      1: \"Query changed records since last sync\"\n      2: \"Map internal format to CRM fields\"\n      3: \"Batch upsert to CRM API\"\n      4: \"Store sync timestamp\"\n      5: \"Log sync results\"\n      \n  conflict_resolution:\n    strategy: \"Last write wins with audit trail\"\n    timestamp_field: \"updated_at\"\n    audit: \"Log both versions before overwrite\"\n\n\nPattern: Email Processing Pipeline\n\nemail_pipeline:\n  trigger: \"IMAP Email (polling every 5 min)\"\n  steps:\n    1: \"Read new emails\"\n    2: \"Classify intent (AI/rules)\"\n    3: \"Extract structured data (sender, subject, key fields)\"\n    4: \"Route by classification\"\n    5_support: \"Create ticket in helpdesk\"\n    5_sales: \"Add to CRM as lead\"\n    5_billing: \"Forward to accounting\"\n    5_spam: \"Archive and skip\"\n    6: \"Send auto-acknowledgment\"\n    7: \"Log to audit trail\"\n\n\nPattern: Multi-Step Approval\n\napproval_workflow:\n  trigger: \"Form/webhook (new request)\"\n  steps:\n    1: \"Create request record (status: pending)\"\n    2: \"Send Slack message with Approve/Reject buttons\"\n    3: \"Wait for webhook callback (button click)\"\n    4_approved: \"Execute action + notify requester\"\n    4_rejected: \"Notify requester with reason\"\n    5: \"Update request status\"\n    6: \"Log to audit trail\"\n  timeout: \"48 hours → auto-escalate to manager\"\n\n\nPattern: AI-Powered Processing\n\nai_pipeline:\n  trigger: \"Webhook or schedule\"\n  steps:\n    1: \"Receive raw data (text, email, document)\"\n    2: \"Pre-process (clean, chunk if needed)\"\n    3: \"Send to LLM (OpenAI/Anthropic/local)\"\n    4: \"Parse structured response\"\n    5: \"Validate LLM output (check required fields, format)\"\n    6: \"Route based on classification\"\n    7: \"Human review if confidence < threshold\"\n    8: \"Store result + feedback for improvement\"\n  \n  llm_node_config:\n    model: \"gpt-4o-mini for classification, gpt-4o for generation\"\n    temperature: 0 for extraction/classification, 0.7 for generation\n    max_tokens: \"Set explicit limit to control cost\"\n    system_prompt: \"Be specific. Include output format. Add examples.\"\n    \n  cost_control:\n    - \"Use cheapest model that achieves accuracy target\"\n    - \"Cache repeated queries (check before calling LLM)\"\n    - \"Batch similar items into single LLM call when possible\"\n    - \"Track cost per execution in workflow metrics\"\n\n6.2 Data Mapping Cheat Sheet\n// Common field mapping patterns in Code nodes\n\n// Dates — always normalize to ISO\nconst isoDate = new Date(data.date_field).toISOString();\nconst dateOnly = new Date(data.date_field).toISOString().split('T')[0];\n\n// Names\nconst fullName = `${data.firstName || ''} ${data.lastName || ''}`.trim();\nconst [firstName, ...rest] = data.fullName.split(' ');\nconst lastName = rest.join(' ');\n\n// Currency — always store as cents/minor units\nconst amountCents = Math.round(parseFloat(data.amount) * 100);\nconst amountDisplay = (data.amount_cents / 100).toFixed(2);\n\n// Phone — normalize\nconst phone = data.phone?.replace(/\\D/g, '');\n\n// Email — normalize\nconst email = data.email?.toLowerCase().trim();\n\n// Null safety\nconst value = data.field ?? 'default';\nconst nested = data.parent?.child?.value ?? null;\n\n// Array handling\nconst tags = Array.isArray(data.tags) ? data.tags : [data.tags].filter(Boolean);\nconst csvToArray = data.csv_field?.split(',').map(s => s.trim()) || [];\nconst arrayToCsv = data.array_field?.join(', ') || '';\n\nPhase 7: Sub-Workflow Architecture\n7.1 When to Extract Sub-Workflows\nSignal\tAction\nSame logic in 3+ workflows\tExtract to sub-workflow\nWorkflow > 30 nodes\tDecompose into main + sub-workflows\nDifferent error handling needed\tSeparate error domains\nTeam wants to reuse a process\tMake it a callable sub-workflow\nNeed to test a section independently\tExtract and test separately\n7.2 Sub-Workflow Design Rules\nsub_workflow_rules:\n  naming: \"[SUB] Description — Input/Output\"\n  interface:\n    - \"Define clear input schema (what data it expects)\"\n    - \"Define clear output schema (what it returns)\"\n    - \"Document side effects (external API calls, DB writes)\"\n  \n  input_validation:\n    - \"First node: validate required fields exist\"\n    - \"Return clear error if validation fails\"\n    \n  output_contract:\n    - \"Always return consistent structure\"\n    - \"Include success/failure status\"\n    - \"Include execution metadata (duration, items processed)\"\n    \n  example_output:\n    success: true\n    items_processed: 42\n    errors: []\n    duration_ms: 1234\n\n7.3 Orchestrator Pattern\n[PROCESS] Order Fulfillment — Orchestrator (v1.0)\n  │\n  ├── [SUB] Validate Order — Input Check\n  │     └── Returns: { valid: true/false, errors: [] }\n  │\n  ├── [SUB] Check Inventory — Stock Verification  \n  │     └── Returns: { inStock: true/false, items: [] }\n  │\n  ├── [SUB] Process Payment — Stripe Charge\n  │     └── Returns: { charged: true/false, chargeId: \"\" }\n  │\n  ├── [SUB] Create Shipment — Shipping Label\n  │     └── Returns: { trackingNumber: \"\", labelUrl: \"\" }\n  │\n  └── [SUB] Send Confirmations — Email + SMS\n        └── Returns: { emailSent: true, smsSent: true }\n\nOrchestrator handles:\n  - Sequential execution order\n  - Rollback on failure (reverse previous steps)\n  - Status tracking (store state between steps)\n  - Timeout management (overall SLA)\n\nPhase 8: n8n Static Data & State Management\n8.1 Static Data Patterns\n// Global static data (persists across executions)\nconst staticData = $getWorkflowStaticData('global');\n\n// Pattern: Last processed ID (for incremental sync)\nconst lastId = staticData.lastProcessedId || 0;\n// ... process items where id > lastId ...\nstaticData.lastProcessedId = maxProcessedId;\n\n// Pattern: Rate limit tracking\nstaticData.apiCalls = (staticData.apiCalls || 0) + 1;\nstaticData.windowStart = staticData.windowStart || Date.now();\nif (Date.now() - staticData.windowStart > 3600000) {\n  staticData.apiCalls = 1;\n  staticData.windowStart = Date.now();\n}\n\n// Pattern: Deduplication cache\nconst cache = staticData.processedIds || {};\nconst newItems = items.filter(item => {\n  if (cache[item.json.id]) return false;\n  cache[item.json.id] = Date.now();\n  return true;\n});\n// Prune cache entries older than 24h\nfor (const [id, ts] of Object.entries(cache)) {\n  if (Date.now() - ts > 86400000) delete cache[id];\n}\nstaticData.processedIds = cache;\n\n8.2 External State (When Static Data Isn't Enough)\nstate_management:\n  static_data:\n    capacity: \"~1MB per workflow\"\n    persistence: \"Survives restarts\"\n    use_for: \"Counters, last-processed IDs, small caches\"\n    dont_use_for: \"Large datasets, shared state between workflows\"\n    \n  database:\n    use_for: \"Shared state, large datasets, audit trails\"\n    options: [\"Postgres\", \"SQLite\", \"Redis\"]\n    pattern: \"Read state → Process → Write state (in same execution)\"\n    \n  google_sheets:\n    use_for: \"Human-readable state, manual override capability\"\n    pattern: \"Config sheet = feature flags, processing rules\"\n    \n  redis:\n    use_for: \"High-speed counters, distributed locks, pub/sub\"\n    pattern: \"Rate limiting, dedup across multiple workflows\"\n\nPhase 9: Security & Credentials\n9.1 Credential Management Rules\ncredential_rules:\n  DO:\n    - \"Use n8n Credential Store for ALL secrets\"\n    - \"Use environment variables for config (URLs, feature flags)\"\n    - \"Rotate API keys on schedule (quarterly minimum)\"\n    - \"Use OAuth2 over API keys when available\"\n    - \"Limit credential scope (least privilege)\"\n    - \"Audit credential usage quarterly\"\n    \n  NEVER:\n    - \"Hardcode secrets in Code nodes\"\n    - \"Put API keys in webhook URLs\"\n    - \"Log full request/response bodies (may contain secrets)\"\n    - \"Share credentials between dev/staging/prod\"\n    - \"Use personal API keys for production workflows\"\n\n9.2 Webhook Security Implementation\n// HMAC signature verification (Stripe, GitHub, etc.)\nconst crypto = require('crypto');\n\nconst signature = $request.headers['x-hub-signature-256'];\nconst secret = $env.WEBHOOK_SECRET;\nconst body = JSON.stringify($json);\n\nconst expected = 'sha256=' + crypto\n  .createHmac('sha256', secret)\n  .update(body)\n  .digest('hex');\n\nif (signature !== expected) {\n  // Return 401 via Respond to Webhook node\n  return [{ json: { error: 'Invalid signature', _reject: true } }];\n}\n\nreturn items;\n\n9.3 Data Privacy Checklist\nprivacy_checklist:\n  pii_handling:\n    - \"Identify PII fields in every workflow (email, name, phone, IP)\"\n    - \"Minimize PII: only pass fields actually needed\"\n    - \"Mask PII in logs (email → j***@example.com)\"\n    - \"Set execution data pruning (don't keep PII forever)\"\n    \n  execution_data:\n    - \"Save execution data: Only on error (production)\"\n    - \"Save execution data: Always (development only)\"\n    - \"Prune executions older than 30 days\"\n    - \"Don't store full response bodies from external APIs\"\n    \n  compliance:\n    - \"GDPR: Can you delete a user's data from all workflow states?\"\n    - \"Audit trail: Can you prove what data was processed and when?\"\n    - \"Data residency: Are API calls going to correct region?\"\n\nPhase 10: Performance & Optimization\n10.1 Performance Optimization Priority Stack\nPriority\tTechnique\tImpact\n1\tBatch API calls (bulk endpoints)\t10-100x fewer API calls\n2\tParallel execution (split + merge)\t2-5x faster processing\n3\tFilter early (drop items before heavy processing)\tReduces compute\n4\tCache repeated lookups (static data)\tFewer API calls\n5\tMinimize data passed between nodes\tReduces memory\n6\tUse sub-workflows for heavy sections\tBetter resource management\n7\tSchedule during off-peak hours\tReduces contention\n8\tOptimize Code node algorithms\tReduces CPU time\n10.2 Batch Processing Template\nbatch_template:\n  step_1: \"Collect all items (trigger / query)\"\n  step_2: \"Split In Batches (size based on API limit)\"\n  step_3: \"Process batch (use bulk/batch API endpoint)\"\n  step_4: \"Wait node (respect rate limit between batches)\"\n  step_5: \"Aggregate results\"\n  step_6: \"Report summary\"\n  \n  sizing_guide:\n    stripe_api: 100  # Stripe list limit\n    hubspot_api: 100  # HubSpot batch limit\n    postgres_insert: 1000  # Comfortable batch insert\n    email_send: 50  # Avoid spam filters\n    slack_api: 20  # Rate limit friendly\n    openai_api: 1  # Usually per-request\n\n10.3 Memory Optimization\n// Anti-pattern: Passing full objects through entire workflow\n// ❌ BAD\nreturn items; // Each item has 50 fields, only need 3\n\n// ✅ GOOD: Extract only needed fields early\nreturn items.map(item => ({\n  json: {\n    id: item.json.id,\n    email: item.json.email,\n    status: item.json.status,\n  }\n}));\n\n// Anti-pattern: Accumulating in memory\n// ❌ BAD: Loading 100K records into Code node\n// ✅ GOOD: Use database queries with LIMIT/OFFSET, process in batches\n\nPhase 11: Testing & Debugging\n11.1 Testing Methodology\ntesting_levels:\n  unit_test:\n    what: \"Individual nodes with sample data\"\n    how: \"Pin test data on trigger node, execute single node\"\n    when: \"Building each node\"\n    \n  integration_test:\n    what: \"Full workflow with test data\"\n    how: \"Manual trigger with test payload, verify all outputs\"\n    when: \"Before activating\"\n    \n  smoke_test:\n    what: \"Quick check that workflow still works\"\n    how: \"Trigger with minimal valid payload, check success\"\n    when: \"After any change, weekly health check\"\n    \n  load_test:\n    what: \"Performance under volume\"\n    how: \"Send 100+ items through, measure time and errors\"\n    when: \"Before scaling to production volume\"\n\n11.2 Debugging Checklist\ndebugging_steps:\n  1_reproduce:\n    - \"Find the failed execution in execution list\"\n    - \"Check which node failed (red highlight)\"\n    - \"Read the error message carefully\"\n    \n  2_inspect:\n    - \"Check input data to failed node (is it what you expected?)\"\n    - \"Check node configuration (expressions resolving correctly?)\"\n    - \"Check credentials (still valid? permissions?)\"\n    \n  3_common_fixes:\n    expression_error: \"Wrap in try/catch or use ?? for null safety\"\n    timeout: \"Increase timeout, check if API is actually up\"\n    auth_error: \"Re-authenticate credential, check token expiry\"\n    rate_limit: \"Add Wait node, reduce batch size\"\n    json_parse: \"Check response is actually JSON (not HTML error page)\"\n    missing_field: \"Data shape changed — update field mapping\"\n    \n  4_isolate:\n    - \"Pin input data on the failing node\"\n    - \"Execute just that node\"\n    - \"If it works in isolation, problem is upstream data\"\n\n11.3 Monitoring Dashboard\nmonitoring:\n  metrics_to_track:\n    - name: \"Execution success rate\"\n      target: \">99%\"\n      alert_threshold: \"<95%\"\n      \n    - name: \"Average execution time\"\n      target: \"Under SLA\"\n      alert_threshold: \">2x normal\"\n      \n    - name: \"Items processed per run\"\n      target: \"Expected range\"\n      alert_threshold: \"0 items (nothing processed) or >10x normal\"\n      \n    - name: \"Error frequency by type\"\n      target: \"Decreasing trend\"\n      alert_threshold: \"Same error >3 times in 24h\"\n      \n    - name: \"API quota usage\"\n      target: \"<80% of limit\"\n      alert_threshold: \">90% of limit\"\n      \n  health_check_workflow:\n    schedule: \"Every 30 minutes\"\n    checks:\n      - \"Can reach external APIs? (HEAD request)\"\n      - \"Database connection alive?\"\n      - \"Disk space for execution data?\"\n      - \"Any workflows stuck in 'running' >1 hour?\"\n    alert_channel: \"Slack #n8n-alerts\"\n\nPhase 12: Production Deployment & Maintenance\n12.1 Deployment Checklist\npre_activation:\n  workflow:\n    - [ ] \"Workflow description filled in (purpose, owner, version)\"\n    - [ ] \"All nodes named descriptively (not 'HTTP Request 1')\"\n    - [ ] \"Sticky notes explain complex sections\"\n    - [ ] \"Error trigger workflow connected\"\n    - [ ] \"Test data pins removed\"\n    - [ ] \"No hardcoded secrets or URLs\"\n    - [ ] \"Environment variables used for config\"\n    \n  testing:\n    - [ ] \"Happy path tested with real-shape data\"\n    - [ ] \"Error paths tested (bad data, API failure, timeout)\"\n    - [ ] \"Edge cases tested (empty array, null fields, special chars)\"\n    - [ ] \"Load tested at expected volume\"\n    \n  operations:\n    - [ ] \"Execution data retention configured\"\n    - [ ] \"Alert channel receiving error notifications\"\n    - [ ] \"Runbook written for common failure scenarios\"\n    - [ ] \"Owner documented (who to page at 3 AM)\"\n\n12.2 Workflow Versioning Strategy\nversioning:\n  format: \"vMAJOR.MINOR (in workflow name + description)\"\n  \n  major_bump: \"Breaking changes — new trigger, changed output format\"\n  minor_bump: \"Improvements — new fields, better error handling\"\n  \n  changelog_location: \"Workflow description field\"\n  changelog_format: |\n    ## v2.1 (2024-03-15)\n    - Added retry logic for Stripe API calls\n    - Fixed timezone conversion for EU customers\n    \n    ## v2.0 (2024-02-01)\n    - Migrated from REST to GraphQL API\n    - Breaking: output format changed\n    \n  backup_strategy:\n    - \"Export workflow JSON before major changes\"\n    - \"Store in git repo: workflows/[category]/[name].json\"\n    - \"Tag with version: git tag workflow-name-v2.1\"\n\n12.3 Maintenance Schedule\nmaintenance:\n  daily:\n    - \"Check error notifications channel\"\n    - \"Review failed executions (>0 = investigate)\"\n    \n  weekly:\n    - \"Review execution volume trends\"\n    - \"Check API quota usage\"\n    - \"Process dead letter queue items\"\n    \n  monthly:\n    - \"Review and prune old executions\"\n    - \"Audit credential usage\"\n    - \"Update workflow documentation\"\n    - \"Review performance (any slow workflows?)\"\n    \n  quarterly:\n    - \"Rotate API keys and tokens\"\n    - \"Review all active workflows — still needed?\"\n    - \"Update n8n version (test in staging first)\"\n    - \"Archive unused workflows\"\n\nPhase 13: Complete Workflow Templates\n13.1 Template: Lead Capture → CRM → Notification\nname: \"[INGEST] Web Lead → HubSpot + Slack Alert (v1.0)\"\ntrigger: Webhook (form submission)\nnodes:\n  1_webhook:\n    type: Webhook\n    path: \"/lead-capture\"\n    method: POST\n    response: \"Respond to Webhook (immediate 200)\"\n    \n  2_validate:\n    type: IF\n    condition: \"email exists AND email contains @\"\n    false_path: \"→ Log invalid submission → End\"\n    \n  3_enrich:\n    type: HTTP Request\n    url: \"Clearbit/Apollo enrichment API\"\n    fallback: \"Continue without enrichment\"\n    \n  4_dedupe:\n    type: Code\n    logic: \"Check HubSpot for existing contact by email\"\n    \n  5_create_or_update:\n    type: HubSpot\n    action: \"Create/update contact\"\n    fields: [email, name, company, source, enrichment_data]\n    \n  6_notify:\n    type: Slack\n    channel: \"#sales-leads\"\n    message: \"🎯 New lead: {name} from {company} — {source}\"\n    \n  7_auto_reply:\n    type: Email (SMTP)\n    to: \"{{ $json.email }}\"\n    template: \"Thanks for your interest, we'll be in touch within 24h\"\n\n13.2 Template: Scheduled Report Generator\nname: \"[EXPORT] Weekly Sales Report — Email (v1.0)\"\ntrigger: Schedule (Monday 8 AM)\nnodes:\n  1_schedule:\n    type: Schedule Trigger\n    cron: \"0 8 * * 1\"\n    \n  2_query_data:\n    type: Postgres\n    query: |\n      SELECT \n        date_trunc('day', created_at) as day,\n        COUNT(*) as deals,\n        SUM(amount) as revenue,\n        AVG(amount) as avg_deal\n      FROM deals \n      WHERE created_at >= NOW() - INTERVAL '7 days'\n      GROUP BY 1 ORDER BY 1\n      \n  3_calculate_summary:\n    type: Code\n    logic: \"Calculate totals, WoW change, top deals\"\n    \n  4_format_report:\n    type: Code\n    logic: \"Generate HTML email body with tables and charts links\"\n    \n  5_send_email:\n    type: Email (SMTP)\n    to: \"sales-team@company.com\"\n    subject: \"📊 Weekly Sales Report — W{{ weekNumber }}\"\n    html: \"{{ $json.reportHtml }}\"\n\n13.3 Template: AI Support Ticket Classifier\nname: \"[AI] Support Ticket — Classify + Route (v1.0)\"\ntrigger: Webhook (helpdesk new ticket)\nnodes:\n  1_webhook:\n    type: Webhook\n    \n  2_classify:\n    type: OpenAI Chat\n    model: \"gpt-4o-mini\"\n    system: |\n      Classify this support ticket. Return JSON:\n      {\n        \"category\": \"bug|feature_request|billing|how_to|account|other\",\n        \"priority\": \"P0|P1|P2|P3\",\n        \"sentiment\": \"angry|frustrated|neutral|positive\",\n        \"summary\": \"one sentence summary\",\n        \"suggested_response\": \"draft response\"\n      }\n    temperature: 0\n    \n  3_parse:\n    type: Code\n    logic: \"JSON.parse response, validate required fields\"\n    \n  4_route:\n    type: Switch\n    on: \"{{ $json.category }}\"\n    cases:\n      bug: \"→ Assign to engineering team\"\n      billing: \"→ Assign to finance team\"\n      feature_request: \"→ Add to product backlog\"\n      default: \"→ Assign to general support\"\n      \n  5_priority_alert:\n    type: IF\n    condition: \"priority == P0\"\n    true_path: \"→ Slack alert to on-call\"\n    \n  6_update_ticket:\n    type: HTTP Request\n    action: \"Update ticket with classification tags\"\n    \n  7_auto_respond:\n    type: IF\n    condition: \"category == how_to AND confidence > 0.9\"\n    true_path: \"→ Send suggested_response as reply\"\n    false_path: \"→ Save draft for human review\"\n\n13.4 Template: Multi-System Data Sync\nname: \"[SYNC] Stripe → Postgres → HubSpot — Payments (v1.0)\"\ntrigger: Webhook (Stripe payment_intent.succeeded)\nnodes:\n  1_webhook:\n    type: Webhook\n    security: \"HMAC signature verification\"\n    \n  2_verify_signature:\n    type: Code\n    logic: \"Stripe HMAC verification\"\n    \n  3_extract_payment:\n    type: Code\n    logic: \"Extract customer, amount, metadata from Stripe event\"\n    \n  4_upsert_db:\n    type: Postgres\n    action: \"INSERT ON CONFLICT UPDATE\"\n    table: \"payments\"\n    \n  5_update_crm:\n    type: HubSpot\n    action: \"Update deal stage to 'Closed Won'\"\n    \n  6_notify_team:\n    type: Slack\n    message: \"💰 Payment received: ${{ amount }} from {{ customer }}\"\n    \n  7_send_receipt:\n    type: Email (SMTP)\n    to: \"{{ customer_email }}\"\n    template: \"Payment confirmation\"\n\nPhase 14: Advanced Patterns\n14.1 Fan-Out / Fan-In (Parallel Processing)\npattern: \"Split work across parallel paths, merge results\"\nuse_case: \"Enrich contacts from 3 APIs simultaneously\"\nimplementation:\n  1: \"Trigger with batch of contacts\"\n  2: \"Split into 3 parallel HTTP Request nodes\"\n  3: \"Each calls different API (Clearbit, Apollo, LinkedIn)\"\n  4: \"Merge node (Combine mode) joins results\"\n  5: \"Code node merges enrichment data per contact\"\n  \nbenefit: \"3x faster than sequential API calls\"\ncaveat: \"All 3 branches must handle their own errors\"\n\n14.2 Event-Driven Architecture\npattern: \"Workflows trigger other workflows via internal webhooks\"\nimplementation:\n  producer: |\n    [PROCESS] Order Created\n    → Process order\n    → HTTP Request to internal webhook: /event/order-created\n    \n  consumers:\n    - \"[NOTIFY] Order Confirmation → Email\"\n    - \"[SYNC] Order → Inventory Update\"  \n    - \"[SYNC] Order → Accounting System\"\n    - \"[AI] Order → Fraud Detection\"\n    \nbenefit: \"Loose coupling — add new consumers without changing producer\"\ncaveat: \"Need to handle consumer failures independently\"\n\n14.3 Feature Flag Pattern\npattern: \"Control workflow behavior without editing\"\nimplementation:\n  config_source: \"Google Sheet or database table\"\n  columns: [feature_name, enabled, percentage, notes]\n  \n  in_workflow:\n    1: \"Read config at start of workflow\"\n    2: \"IF node checks feature flag\"\n    3: \"true → new behavior, false → old behavior\"\n    \n  examples:\n    - feature: \"use_gpt4o_mini\"\n      check: \"Route to cheaper model when enabled\"\n    - feature: \"skip_enrichment\"\n      check: \"Bypass API calls during outage\"\n    - feature: \"double_check_mode\"\n      check: \"Add human approval step\"\n\n14.4 Queue Pattern (High Volume)\npattern: \"Buffer incoming items, process at controlled rate\"\nuse_case: \"1000 webhook events/minute, API limit 10/minute\"\nimplementation:\n  ingestion_workflow:\n    1: \"Webhook receives event\"\n    2: \"Write to queue (database table: status=pending)\"\n    3: \"Return 200 immediately\"\n    \n  processing_workflow:\n    1: \"Schedule trigger (every minute)\"\n    2: \"Query: SELECT * FROM queue WHERE status='pending' LIMIT 10\"\n    3: \"Process batch\"\n    4: \"UPDATE status='completed'\"\n    5: \"On error: UPDATE status='failed', retry_count++\"\n    \nbenefit: \"Never lose events, process at sustainable rate\"\n\nPhase 15: n8n Instance Management\n15.1 Environment Strategy\nenvironments:\n  development:\n    purpose: \"Building and testing new workflows\"\n    data: \"Test/mock data only\"\n    execution_saving: \"All executions\"\n    \n  staging:\n    purpose: \"Pre-production validation\"\n    data: \"Anonymized production-like data\"\n    execution_saving: \"All executions\"\n    \n  production:\n    purpose: \"Live workflows\"\n    data: \"Real data\"\n    execution_saving: \"Errors only (save disk)\"\n    \n  promotion_process:\n    1: \"Build in dev\"\n    2: \"Export workflow JSON\"\n    3: \"Import to staging, test with realistic data\"\n    4: \"Export again (staging may have fixes)\"\n    5: \"Import to production\"\n    6: \"Activate and monitor first 24h\"\n\n15.2 n8n Performance Tuning\ntuning:\n  execution_mode: \"queue\"  # For high volume (requires Redis)\n  \n  environment_variables:\n    EXECUTIONS_DATA_SAVE_ON_ERROR: \"all\"\n    EXECUTIONS_DATA_SAVE_ON_SUCCESS: \"none\"  # Save disk in production\n    EXECUTIONS_DATA_SAVE_MANUAL_EXECUTIONS: \"true\"\n    EXECUTIONS_DATA_MAX_AGE: 720  # Hours (30 days)\n    EXECUTIONS_DATA_PRUNE: \"true\"\n    GENERIC_TIMEZONE: \"UTC\"  # Always UTC internally\n    N8N_CONCURRENCY_PRODUCTION_LIMIT: 20  # Parallel executions\n    \n  scaling:\n    vertical: \"More CPU/RAM for the n8n instance\"\n    horizontal: \"Queue mode + multiple workers\"\n    webhook_scaling: \"Separate webhook processor from main\"\n\nScoring Rubric: Workflow Quality Assessment\n\nRate any n8n workflow 0-100 across 8 dimensions:\n\nDimension\tWeight\t0 (Poor)\t5 (Adequate)\t10 (Excellent)\nReliability\t20%\tNo error handling\tBasic error trigger\tFull retry + DLQ + alerts\nSecurity\t15%\tHardcoded secrets\tCredential store\tHMAC + validation + audit\nPerformance\t15%\tSequential, no batching\tSome batching\tOptimized + cached + parallel\nMaintainability\t15%\tNo names, no docs\tNamed nodes\tFull docs + versioned + sticky notes\nData Quality\t10%\tNo validation\tBasic checks\tSchema validation + dedup + transform\nObservability\t10%\tNo monitoring\tError alerts\tMetrics + logging + health checks\nScalability\t10%\tBreaks at 100 items\tHandles 1K\tBatched + queued + horizontal\nReusability\t5%\tMonolithic\tSome sub-workflows\tModular + documented interfaces\n\nScore:\n\n0-30: Prototype — not production ready\n31-60: Functional — works but fragile\n61-80: Production — solid with room to improve\n81-100: Enterprise — resilient, observable, scalable\n10 Commandments of n8n Workflow Engineering\nEvery production workflow has an error handler — no exceptions\nNever hardcode secrets — credential store or env vars only\nName every node — \"HTTP Request 4\" is tech debt\nFilter early, transform late — drop bad data before heavy processing\nBatch everything — one API call for 100 items beats 100 calls for 1\nTest with real-shaped data — mock data hides real bugs\nVersion your workflows — in the name and description\nDocument the \"why\" — sticky notes explain decisions, not obvious steps\nMonitor actively — don't discover failures from angry users\nKeep it simple — if you need a diagram to explain it, decompose it\nNatural Language Commands\n\nWhen a user asks you to help with n8n, interpret these commands:\n\nCommand\tAction\n\"Build a workflow for [task]\"\tDesign complete workflow using templates above\n\"Review this workflow\"\tScore against rubric, suggest improvements\n\"Debug [workflow/error]\"\tFollow debugging checklist\n\"Optimize [workflow]\"\tApply performance optimization stack\n\"Add error handling to [workflow]\"\tImplement error trigger + retry + alert pattern\n\"Create a sub-workflow for [logic]\"\tExtract with clear interface\n\"Set up monitoring\"\tImplement health check + alert workflow\n\"Migrate workflow to production\"\tFollow deployment checklist\n\"Design integration for [A] → [B]\"\tSelect pattern from integration library\n\"Add AI to [workflow]\"\tImplement AI pipeline pattern\n\"Handle rate limits for [API]\"\tImplement batching + wait + circuit breaker\n\"Audit my n8n setup\"\tRun quick health check, score, prioritize fixes"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/1kalin/afrexai-n8n-mastery",
    "publisherUrl": "https://clawhub.ai/1kalin/afrexai-n8n-mastery",
    "owner": "1kalin",
    "version": "1.0.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/afrexai-n8n-mastery",
    "downloadUrl": "https://openagent3.xyz/downloads/afrexai-n8n-mastery",
    "agentUrl": "https://openagent3.xyz/skills/afrexai-n8n-mastery/agent",
    "manifestUrl": "https://openagent3.xyz/skills/afrexai-n8n-mastery/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/afrexai-n8n-mastery/agent.md"
  }
}