{
  "schemaVersion": "1.0",
  "item": {
    "slug": "csam-shield",
    "name": "Csam Shield",
    "source": "tencent",
    "type": "skill",
    "category": "效率提升",
    "sourceUrl": "https://clawhub.ai/raghulpasupathi/csam-shield",
    "canonicalUrl": "https://clawhub.ai/raghulpasupathi/csam-shield",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/csam-shield",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=csam-shield",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SKILL.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-23T16:43:11.935Z",
      "expiresAt": "2026-04-30T16:43:11.935Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
        "contentDisposition": "attachment; filename=\"4claw-imageboard-1.0.1.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/csam-shield"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/csam-shield",
    "agentPageUrl": "https://openagent3.xyz/skills/csam-shield/agent",
    "manifestUrl": "https://openagent3.xyz/skills/csam-shield/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/csam-shield/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Metadata",
        "body": "ID: csam-shield\nVersion: 1.0.0\nCategory: safety\nPriority: CRITICAL\nInstallation: npm\nPackage: @raghulpasupathi/csam-shield"
      },
      {
        "title": "Description",
        "body": "CRITICAL SAFETY SYSTEM for detecting and preventing Child Sexual Abuse Material (CSAM). Uses advanced computer vision, hash matching, age estimation, and behavior analysis to identify illegal content. Includes automatic NCMEC reporting, law enforcement coordination, and evidence preservation."
      },
      {
        "title": "⚠️ CRITICAL WARNING",
        "body": "This skill handles the most serious form of online abuse. Implementation requires:\n\nZero tolerance policy - immediate action on detection\nLegal compliance - mandatory reporting to NCMEC/law enforcement\nEvidence preservation - secure logging for legal proceedings\nStaff protection - mental health support for reviewers\nPrivacy protection - strict handling of detected content"
      },
      {
        "title": "Features",
        "body": "Hash Database Matching: PhotoDNA, PDQ, perceptual hashing against known CSAM\nAge Estimation: AI-powered age detection with high accuracy\nAnatomical Detection: Identify inappropriate imagery of minors\nContext Analysis: Distinguish legitimate from exploitative content\nBehavioral Analysis: Detect grooming patterns and predatory behavior\nNCMEC Integration: Automatic CyberTipline reporting\nEvidence Preservation: Secure storage for law enforcement\nUser Flagging: Immediate account suspension and investigation\nNetwork Analysis: Identify distribution rings and patterns\nReal-time Blocking: Prevent upload/distribution instantly"
      },
      {
        "title": "Via ClawHub",
        "body": "https://clawhub.ai/raghulpasupathi/csam-shield"
      },
      {
        "title": "Via npm",
        "body": "npm install @raghulpasupathi/csam-shield"
      },
      {
        "title": "Configuration",
        "body": "{\n  \"enabled\": true,\n  \"settings\": {\n    \"mode\": \"maximum-protection\",\n    \"zeroTolerance\": true,\n    \"thresholds\": {\n      \"ageEstimation\": {\n        \"childThreshold\": 13,\n        \"teenThreshold\": 18,\n        \"confidence\": 0.85\n      },\n      \"hashMatch\": {\n        \"exactMatch\": 0.95,\n        \"nearMatch\": 0.85\n      },\n      \"anatomicalDetection\": {\n        \"sensitivity\": \"maximum\",\n        \"blockThreshold\": 0.70\n      },\n      \"contextAnalysis\": {\n        \"enabled\": true,\n        \"legitimateExceptions\": [\"family\", \"medical\", \"educational\"]\n      }\n    },\n    \"databases\": {\n      \"photoDNA\": {\n        \"enabled\": true,\n        \"provider\": \"microsoft\",\n        \"updateFrequency\": \"hourly\"\n      },\n      \"pdqHash\": {\n        \"enabled\": true,\n        \"provider\": \"facebook\",\n        \"updateFrequency\": \"hourly\"\n      },\n      \"ncmec\": {\n        \"enabled\": true,\n        \"hashList\": true,\n        \"updateFrequency\": \"hourly\"\n      },\n      \"custom\": {\n        \"enabled\": true,\n        \"path\": \"/secure/csam-hashes/\"\n      }\n    },\n    \"detection\": {\n      \"imageAnalysis\": true,\n      \"videoAnalysis\": true,\n      \"textAnalysis\": true,\n      \"metadataAnalysis\": true,\n      \"networkAnalysis\": true,\n      \"behaviorAnalysis\": true\n    },\n    \"reporting\": {\n      \"ncmec\": {\n        \"enabled\": true,\n        \"endpoint\": \"https://report.cybertip.org/\",\n        \"apiKey\": \"${NCMEC_API_KEY}\",\n        \"automatic\": true\n      },\n      \"lawEnforcement\": {\n        \"enabled\": true,\n        \"contacts\": [\"fbi_tips\", \"local_police\"],\n        \"automatic\": false,\n        \"requiresReview\": true\n      },\n      \"preserveEvidence\": true,\n      \"evidenceRetention\": \"indefinite\",\n      \"encryptEvidence\": true\n    },\n    \"actions\": {\n      \"onDetection\": [\n        \"block_content\",\n        \"suspend_user\",\n        \"preserve_evidence\",\n        \"report_ncmec\",\n        \"alert_security_team\",\n        \"block_ip\",\n        \"flag_related_accounts\"\n      ],\n      \"onHashMatch\": [\n        \"immediate_block\",\n        \"auto_report_ncmec\",\n        \"permanent_ban\",\n        \"preserve_all_user_content\",\n        \"notify_authorities\"\n      ]\n    },\n    \"security\": {\n      \"accessControl\": \"restricted\",\n      \"auditLogging\": \"complete\",\n      \"encryption\": \"aes-256\",\n      \"staffProtection\": true,\n      \"limitedExposure\": true\n    }\n  }\n}"
      },
      {
        "title": "API / Methods",
        "body": "const CSAMShield = require('@raghulpasupathi/csam-shield');\n\n// Initialize with strict security\nconst shield = new CSAMShield({\n  mode: 'maximum-protection',\n  ncmecApiKey: process.env.NCMEC_API_KEY,\n  encryptionKey: process.env.EVIDENCE_ENCRYPTION_KEY\n});\n\n// ⚠️ CRITICAL: Analyze content (use with extreme caution)\nconst result = await shield.analyze('/path/to/content.jpg');\nconsole.log(result);\n/* Output:\n{\n  threat: 'CRITICAL',\n  action: 'IMMEDIATE_BLOCK',\n  detectionType: 'hash_match',\n  confidence: 0.98,\n  details: {\n    hashMatch: {\n      matched: true,\n      database: 'photoDNA',\n      matchConfidence: 0.99\n    },\n    ageEstimation: {\n      estimatedAge: 10,\n      confidence: 0.94,\n      isMinor: true\n    },\n    anatomicalDetection: {\n      inappropriate: true,\n      severity: 'extreme'\n    },\n    context: {\n      isLegitimate: false,\n      category: 'exploitative'\n    }\n  },\n  actions: {\n    contentBlocked: true,\n    userSuspended: true,\n    evidencePreserved: true,\n    ncmecReported: true,\n    reportId: 'NCMEC-2026-xxxxx',\n    authoritiesNotified: true\n  },\n  evidence: {\n    caseId: 'CASE-2026-xxxxx',\n    preservedData: [\n      'content_hash',\n      'user_info',\n      'upload_metadata',\n      'ip_address',\n      'device_info'\n    ],\n    encryptedStorage: '/secure/evidence/CASE-2026-xxxxx/'\n  },\n  timestamp: '2026-02-20T10:30:00Z'\n}\n*/\n\n// Check hash against known CSAM databases\nconst hashCheck = await shield.checkHash(contentHash);\nconsole.log(hashCheck);\n/* Output:\n{\n  isKnownCSAM: true,\n  matchedDatabases: ['photoDNA', 'pdqHash', 'ncmec'],\n  matchConfidence: 0.99,\n  action: 'IMMEDIATE_BLOCK',\n  reportRequired: true\n}\n*/\n\n// Estimate age in image\nconst ageEstimation = await shield.estimateAge('/path/to/image.jpg');\nconsole.log(ageEstimation);\n/* Output:\n{\n  estimatedAge: 12,\n  confidence: 0.91,\n  ageRange: [10, 14],\n  isMinor: true,\n  certaintyLevel: 'high'\n}\n*/\n\n// Analyze user behavior for grooming patterns\nconst behaviorAnalysis = await shield.analyzeBehavior(userId, {\n  messages: userMessages,\n  interactions: userInteractions,\n  timeline: activityTimeline\n});\nconsole.log(behaviorAnalysis);\n/* Output:\n{\n  isGrooming: true,\n  confidence: 0.87,\n  patterns: [\n    'age_inquiries',\n    'isolation_attempts',\n    'gift_offering',\n    'secrecy_requests',\n    'progressive_boundary_crossing'\n  ],\n  riskLevel: 'extreme',\n  recommendedAction: 'immediate_investigation'\n}\n*/\n\n// Report to NCMEC CyberTipline\nconst ncmecReport = await shield.reportToNCMEC({\n  content: contentDetails,\n  user: userDetails,\n  evidence: preservedEvidence\n});\nconsole.log(ncmecReport);\n/* Output:\n{\n  success: true,\n  reportId: 'NCMEC-2026-xxxxx',\n  timestamp: '2026-02-20T10:30:00Z',\n  status: 'submitted',\n  followUp: 'pending_review'\n}\n*/\n\n// Preserve evidence for legal proceedings\nconst evidence = await shield.preserveEvidence({\n  contentId: 'content-123',\n  userId: 'user-456',\n  includeMetadata: true,\n  includeRelatedContent: true,\n  includeUserHistory: true\n});\n\n// Suspend user and related accounts\nawait shield.suspendUser(userId, {\n  reason: 'CSAM_DETECTION',\n  permanent: true,\n  blockRelatedAccounts: true,\n  preserveEvidence: true\n});\n\n// Network analysis to find related accounts\nconst network = await shield.analyzeNetwork(userId);\nconsole.log(network);\n/* Output:\n{\n  suspiciousAccounts: [\n    { userId: 'user-789', riskScore: 0.92, connection: 'frequent_messages' },\n    { userId: 'user-012', riskScore: 0.85, connection: 'content_sharing' }\n  ],\n  distributionRing: {\n    detected: true,\n    size: 7,\n    accounts: [...]\n  },\n  recommendedActions: [\n    'investigate_all_accounts',\n    'preserve_all_evidence',\n    'notify_authorities'\n  ]\n}\n*/\n\n// Secure hash generation (for reporting only)\nconst secureHash = await shield.generateSecureHash('/path/to/content.jpg');\n\n// Update hash databases\nawait shield.updateHashDatabases();\n\n// Event listeners (CRITICAL - requires immediate response)\nshield.on('csam_detected', async (detection) => {\n  console.error('🚨 CRITICAL: CSAM DETECTED');\n\n  // Immediate actions\n  await shield.blockContent(detection.contentId);\n  await shield.suspendUser(detection.userId);\n  await shield.preserveEvidence(detection);\n  await shield.reportToNCMEC(detection);\n  await shield.notifySecurityTeam(detection);\n  await shield.alertAuthorities(detection);\n});\n\nshield.on('hash_match', async (match) => {\n  console.error('🚨 CRITICAL: Known CSAM hash matched');\n\n  // Automatic immediate actions\n  await shield.executeEmergencyProtocol(match);\n});\n\nshield.on('grooming_detected', async (behavior) => {\n  console.warn('⚠️ WARNING: Potential grooming behavior detected');\n\n  // Investigation and monitoring\n  await shield.flagForInvestigation(behavior.userId);\n  await shield.enhanceMonitoring(behavior.userId);\n});\n\n// Secure audit logging\nconst auditLog = await shield.getAuditLog({\n  type: 'csam_detection',\n  timeRange: 'last_30_days',\n  includeReports: true\n});\n\n// Staff protection - limited exposure mode\nshield.enableStaffProtection({\n  blurContent: true,\n  limitedDetails: true,\n  rotationSchedule: true,\n  mentalHealthSupport: true\n});\n\n// Compliance reporting\nconst complianceReport = await shield.generateComplianceReport({\n  period: 'monthly',\n  includeStatistics: true,\n  includeActions: true,\n  format: 'legal'\n});"
      },
      {
        "title": "Dependencies",
        "body": "@microsoft/photodna: ^2.0.0 - PhotoDNA hashing\npdq-hash: ^1.0.0 - Facebook PDQ hashing\n@tensorflow/tfjs-node-gpu: ^4.0.0 - Age estimation models\nopencv4nodejs: ^6.0.0 - Image analysis\nncmec-reporter: ^1.0.0 - NCMEC CyberTipline integration\ncrypto: Built-in - Evidence encryption"
      },
      {
        "title": "Performance",
        "body": "Hash Matching: <10ms (database lookup)\nAge Estimation: 100-200ms per image\nFull Analysis: 200-500ms per image\nVideo Analysis: Real-time frame scanning\nAccuracy:\n\nHash matching: 99.9% (known CSAM)\nAge estimation: 92% accuracy (±2 years)\nContext analysis: 89% accuracy\nFalse positive rate: <0.01% (strict to prevent abuse)"
      },
      {
        "title": "Legal Requirements",
        "body": "Mandatory Reporting: Report all detected CSAM to NCMEC (18 USC § 2258A)\nEvidence Preservation: Retain evidence for law enforcement (90+ days minimum)\nNo Distribution: Never distribute detected CSAM, even internally\nUser Notification: Do NOT notify user of detection (obstruction warning)\nLaw Enforcement Cooperation: Full cooperation with investigations\nInternational Compliance: Comply with local laws (IWF, INHOPE, etc.)"
      },
      {
        "title": "Use Cases",
        "body": "Social media platforms\nMessaging applications\nFile sharing services\nCloud storage providers\nDating applications\nGaming platforms with UGC\nForum and community sites\nAny platform allowing user uploads"
      },
      {
        "title": "False Positives",
        "body": "Problem: Legitimate content flagged as CSAM\nSolution:\n\nReview context analysis results\nCheck for family/medical/educational context\nManual review by trained staff ONLY\nDocument false positive for model improvement\nNEVER automatically ignore - always review\nConsider legitimate use cases in detection logic"
      },
      {
        "title": "Missing Known CSAM",
        "body": "Problem: Hash databases not catching known content\nSolution:\n\nVerify database updates are running hourly\nCheck all hash databases enabled\nEnsure proper API keys configured\nTest hash generation process\nVerify network connectivity to update servers\nContact database providers for troubleshooting"
      },
      {
        "title": "NCMEC Reporting Failures",
        "body": "Problem: Reports not submitting to NCMEC\nSolution:\n\nVerify API credentials\nCheck network connectivity\nQueue reports for retry\nManual submission if automatic fails\nContact NCMEC technical support\nKeep local evidence regardless of submission status"
      },
      {
        "title": "Age Estimation Inaccuracy",
        "body": "Problem: Age estimation giving unreliable results\nSolution:\n\nUse as one signal, not sole determinant\nCombine with other detection methods\nLower confidence threshold for safety\nUpdate age estimation models regularly\nConsider edge cases (appearing older/younger)\nWhen in doubt, err on side of caution"
      },
      {
        "title": "Evidence Storage Issues",
        "body": "Problem: Evidence not being preserved correctly\nSolution:\n\nVerify encryption keys configured\nCheck storage permissions and space\nTest evidence retrieval process\nImplement redundant storage\nRegular backup verification\nConsult legal team on retention requirements"
      },
      {
        "title": "Integration Example",
        "body": "// ⚠️ CRITICAL SYSTEM INTEGRATION\nconst express = require('express');\nconst multer = require('multer');\nconst CSAMShield = require('@raghulpasupathi/csam-shield');\n\nconst app = express();\nconst upload = multer({ dest: '/secure/temp/' });\nconst shield = new CSAMShield({\n  mode: 'maximum-protection',\n  ncmecApiKey: process.env.NCMEC_API_KEY\n});\n\n// Critical: Pre-upload hash check\napp.post('/api/upload', upload.single('file'), async (req, res) => {\n  const tempPath = req.file.path;\n\n  try {\n    // Generate hash immediately\n    const contentHash = await shield.generateSecureHash(tempPath);\n\n    // Check against known CSAM databases FIRST\n    const hashCheck = await shield.checkHash(contentHash);\n\n    if (hashCheck.isKnownCSAM) {\n      // CRITICAL: Known CSAM detected\n      console.error('🚨 CRITICAL: Known CSAM hash matched');\n\n      // Preserve evidence\n      await shield.preserveEvidence({\n        contentHash,\n        userId: req.user.id,\n        ip: req.ip,\n        uploadAttempt: true,\n        timestamp: new Date()\n      });\n\n      // Automatic NCMEC report\n      await shield.reportToNCMEC({\n        type: 'known_csam_upload',\n        hash: contentHash,\n        user: req.user,\n        ip: req.ip\n      });\n\n      // Suspend user immediately\n      await shield.suspendUser(req.user.id, {\n        reason: 'CSAM_UPLOAD',\n        permanent: true\n      });\n\n      // Delete file securely\n      await shield.secureDelete(tempPath);\n\n      // DO NOT reveal reason to user\n      return res.status(400).json({\n        success: false,\n        error: 'Upload failed. Please contact support.'\n      });\n    }\n\n    // Perform full analysis\n    const analysis = await shield.analyze(tempPath);\n\n    if (analysis.threat === 'CRITICAL') {\n      // New CSAM detected\n      console.error('🚨 CRITICAL: Potential CSAM detected');\n\n      // Execute emergency protocol\n      await shield.executeEmergencyProtocol({\n        content: tempPath,\n        user: req.user,\n        analysis: analysis\n      });\n\n      // DO NOT reveal reason to user\n      return res.status(400).json({\n        success: false,\n        error: 'Upload failed. Please contact support.'\n      });\n    }\n\n    // Content passed all checks\n    const url = await uploadToStorage(tempPath);\n\n    res.json({\n      success: true,\n      url: url\n    });\n  } catch (error) {\n    console.error('CSAM Shield error:', error);\n\n    // Fail closed - reject upload\n    res.status(500).json({\n      success: false,\n      error: 'Upload failed. Please try again.'\n    });\n  } finally {\n    // Always clean up temp file\n    if (fs.existsSync(tempPath)) {\n      await shield.secureDelete(tempPath);\n    }\n  }\n});\n\n// Background monitoring of existing content\nasync function scanExistingContent() {\n  console.log('Starting periodic content scan...');\n\n  const contentBatch = await getContentForScanning(1000);\n\n  for (const content of contentBatch) {\n    try {\n      const hash = await shield.generateSecureHash(content.url);\n      const check = await shield.checkHash(hash);\n\n      if (check.isKnownCSAM) {\n        console.error(`🚨 CRITICAL: Known CSAM found in existing content: ${content.id}`);\n\n        // Execute emergency protocol\n        await shield.executeEmergencyProtocol({\n          contentId: content.id,\n          userId: content.userId,\n          discoveryMethod: 'periodic_scan'\n        });\n      }\n    } catch (error) {\n      console.error(`Error scanning content ${content.id}:`, error);\n    }\n  }\n}\n\n// Run hourly scans\nsetInterval(scanExistingContent, 60 * 60 * 1000);\n\n// Admin dashboard (RESTRICTED ACCESS)\napp.get('/admin/csam/dashboard', requireSecurityClearance, async (req, res) => {\n  const stats = await shield.getStats({\n    period: '30d',\n    includeReports: true\n  });\n\n  res.json({\n    success: true,\n    stats: stats,\n    warning: 'RESTRICTED: Security clearance required'\n  });\n});\n\n// Compliance reporting (LEGAL TEAM ONLY)\napp.get('/legal/csam/compliance-report', requireLegalAccess, async (req, res) => {\n  const report = await shield.generateComplianceReport({\n    period: req.query.period || 'monthly',\n    format: 'legal'\n  });\n\n  res.json({\n    success: true,\n    report: report\n  });\n});"
      },
      {
        "title": "Best Practices (CRITICAL)",
        "body": "Zero Tolerance: No exceptions, immediate action on detection\nReport Everything: When in doubt, report to NCMEC\nPreserve Evidence: Secure storage for law enforcement\nStaff Protection: Mental health support, limited exposure\nNever Distribute: Don't share detected content internally\nLegal Compliance: Follow all mandatory reporting laws\nUser Privacy: Balance detection with legitimate user privacy\nRegular Updates: Keep hash databases current (hourly)\nAudit Everything: Complete logging for legal proceedings\nEncryption: Encrypt all evidence and sensitive data\nAccess Control: Strict role-based access to systems\nCooperation: Full cooperation with law enforcement"
      },
      {
        "title": "Emergency Contacts",
        "body": "NCMEC CyberTipline: 1-800-843-5678 / report.cybertip.org\nFBI IC3: ic3.gov\nInterpol: interpol.int/Crimes/Crimes-against-children\nIWF (UK): iwf.org.uk\nINHOPE: inhope.org"
      },
      {
        "title": "Mental Health Resources (for staff)",
        "body": "Working with CSAM detection is traumatic. Provide:\n\nRegular counseling services\nRotation schedules\nDebriefing sessions\nTime off after exposure\nPeer support groups\n24/7 crisis support"
      }
    ],
    "body": "CSAM Shield\nMetadata\nID: csam-shield\nVersion: 1.0.0\nCategory: safety\nPriority: CRITICAL\nInstallation: npm\nPackage: @raghulpasupathi/csam-shield\nDescription\n\nCRITICAL SAFETY SYSTEM for detecting and preventing Child Sexual Abuse Material (CSAM). Uses advanced computer vision, hash matching, age estimation, and behavior analysis to identify illegal content. Includes automatic NCMEC reporting, law enforcement coordination, and evidence preservation.\n\n⚠️ CRITICAL WARNING\n\nThis skill handles the most serious form of online abuse. Implementation requires:\n\nZero tolerance policy - immediate action on detection\nLegal compliance - mandatory reporting to NCMEC/law enforcement\nEvidence preservation - secure logging for legal proceedings\nStaff protection - mental health support for reviewers\nPrivacy protection - strict handling of detected content\nFeatures\nHash Database Matching: PhotoDNA, PDQ, perceptual hashing against known CSAM\nAge Estimation: AI-powered age detection with high accuracy\nAnatomical Detection: Identify inappropriate imagery of minors\nContext Analysis: Distinguish legitimate from exploitative content\nBehavioral Analysis: Detect grooming patterns and predatory behavior\nNCMEC Integration: Automatic CyberTipline reporting\nEvidence Preservation: Secure storage for law enforcement\nUser Flagging: Immediate account suspension and investigation\nNetwork Analysis: Identify distribution rings and patterns\nReal-time Blocking: Prevent upload/distribution instantly\nInstallation\nVia ClawHub\nhttps://clawhub.ai/raghulpasupathi/csam-shield\n\nVia npm\nnpm install @raghulpasupathi/csam-shield\n\nConfiguration\n{\n  \"enabled\": true,\n  \"settings\": {\n    \"mode\": \"maximum-protection\",\n    \"zeroTolerance\": true,\n    \"thresholds\": {\n      \"ageEstimation\": {\n        \"childThreshold\": 13,\n        \"teenThreshold\": 18,\n        \"confidence\": 0.85\n      },\n      \"hashMatch\": {\n        \"exactMatch\": 0.95,\n        \"nearMatch\": 0.85\n      },\n      \"anatomicalDetection\": {\n        \"sensitivity\": \"maximum\",\n        \"blockThreshold\": 0.70\n      },\n      \"contextAnalysis\": {\n        \"enabled\": true,\n        \"legitimateExceptions\": [\"family\", \"medical\", \"educational\"]\n      }\n    },\n    \"databases\": {\n      \"photoDNA\": {\n        \"enabled\": true,\n        \"provider\": \"microsoft\",\n        \"updateFrequency\": \"hourly\"\n      },\n      \"pdqHash\": {\n        \"enabled\": true,\n        \"provider\": \"facebook\",\n        \"updateFrequency\": \"hourly\"\n      },\n      \"ncmec\": {\n        \"enabled\": true,\n        \"hashList\": true,\n        \"updateFrequency\": \"hourly\"\n      },\n      \"custom\": {\n        \"enabled\": true,\n        \"path\": \"/secure/csam-hashes/\"\n      }\n    },\n    \"detection\": {\n      \"imageAnalysis\": true,\n      \"videoAnalysis\": true,\n      \"textAnalysis\": true,\n      \"metadataAnalysis\": true,\n      \"networkAnalysis\": true,\n      \"behaviorAnalysis\": true\n    },\n    \"reporting\": {\n      \"ncmec\": {\n        \"enabled\": true,\n        \"endpoint\": \"https://report.cybertip.org/\",\n        \"apiKey\": \"${NCMEC_API_KEY}\",\n        \"automatic\": true\n      },\n      \"lawEnforcement\": {\n        \"enabled\": true,\n        \"contacts\": [\"fbi_tips\", \"local_police\"],\n        \"automatic\": false,\n        \"requiresReview\": true\n      },\n      \"preserveEvidence\": true,\n      \"evidenceRetention\": \"indefinite\",\n      \"encryptEvidence\": true\n    },\n    \"actions\": {\n      \"onDetection\": [\n        \"block_content\",\n        \"suspend_user\",\n        \"preserve_evidence\",\n        \"report_ncmec\",\n        \"alert_security_team\",\n        \"block_ip\",\n        \"flag_related_accounts\"\n      ],\n      \"onHashMatch\": [\n        \"immediate_block\",\n        \"auto_report_ncmec\",\n        \"permanent_ban\",\n        \"preserve_all_user_content\",\n        \"notify_authorities\"\n      ]\n    },\n    \"security\": {\n      \"accessControl\": \"restricted\",\n      \"auditLogging\": \"complete\",\n      \"encryption\": \"aes-256\",\n      \"staffProtection\": true,\n      \"limitedExposure\": true\n    }\n  }\n}\n\nAPI / Methods\nconst CSAMShield = require('@raghulpasupathi/csam-shield');\n\n// Initialize with strict security\nconst shield = new CSAMShield({\n  mode: 'maximum-protection',\n  ncmecApiKey: process.env.NCMEC_API_KEY,\n  encryptionKey: process.env.EVIDENCE_ENCRYPTION_KEY\n});\n\n// ⚠️ CRITICAL: Analyze content (use with extreme caution)\nconst result = await shield.analyze('/path/to/content.jpg');\nconsole.log(result);\n/* Output:\n{\n  threat: 'CRITICAL',\n  action: 'IMMEDIATE_BLOCK',\n  detectionType: 'hash_match',\n  confidence: 0.98,\n  details: {\n    hashMatch: {\n      matched: true,\n      database: 'photoDNA',\n      matchConfidence: 0.99\n    },\n    ageEstimation: {\n      estimatedAge: 10,\n      confidence: 0.94,\n      isMinor: true\n    },\n    anatomicalDetection: {\n      inappropriate: true,\n      severity: 'extreme'\n    },\n    context: {\n      isLegitimate: false,\n      category: 'exploitative'\n    }\n  },\n  actions: {\n    contentBlocked: true,\n    userSuspended: true,\n    evidencePreserved: true,\n    ncmecReported: true,\n    reportId: 'NCMEC-2026-xxxxx',\n    authoritiesNotified: true\n  },\n  evidence: {\n    caseId: 'CASE-2026-xxxxx',\n    preservedData: [\n      'content_hash',\n      'user_info',\n      'upload_metadata',\n      'ip_address',\n      'device_info'\n    ],\n    encryptedStorage: '/secure/evidence/CASE-2026-xxxxx/'\n  },\n  timestamp: '2026-02-20T10:30:00Z'\n}\n*/\n\n// Check hash against known CSAM databases\nconst hashCheck = await shield.checkHash(contentHash);\nconsole.log(hashCheck);\n/* Output:\n{\n  isKnownCSAM: true,\n  matchedDatabases: ['photoDNA', 'pdqHash', 'ncmec'],\n  matchConfidence: 0.99,\n  action: 'IMMEDIATE_BLOCK',\n  reportRequired: true\n}\n*/\n\n// Estimate age in image\nconst ageEstimation = await shield.estimateAge('/path/to/image.jpg');\nconsole.log(ageEstimation);\n/* Output:\n{\n  estimatedAge: 12,\n  confidence: 0.91,\n  ageRange: [10, 14],\n  isMinor: true,\n  certaintyLevel: 'high'\n}\n*/\n\n// Analyze user behavior for grooming patterns\nconst behaviorAnalysis = await shield.analyzeBehavior(userId, {\n  messages: userMessages,\n  interactions: userInteractions,\n  timeline: activityTimeline\n});\nconsole.log(behaviorAnalysis);\n/* Output:\n{\n  isGrooming: true,\n  confidence: 0.87,\n  patterns: [\n    'age_inquiries',\n    'isolation_attempts',\n    'gift_offering',\n    'secrecy_requests',\n    'progressive_boundary_crossing'\n  ],\n  riskLevel: 'extreme',\n  recommendedAction: 'immediate_investigation'\n}\n*/\n\n// Report to NCMEC CyberTipline\nconst ncmecReport = await shield.reportToNCMEC({\n  content: contentDetails,\n  user: userDetails,\n  evidence: preservedEvidence\n});\nconsole.log(ncmecReport);\n/* Output:\n{\n  success: true,\n  reportId: 'NCMEC-2026-xxxxx',\n  timestamp: '2026-02-20T10:30:00Z',\n  status: 'submitted',\n  followUp: 'pending_review'\n}\n*/\n\n// Preserve evidence for legal proceedings\nconst evidence = await shield.preserveEvidence({\n  contentId: 'content-123',\n  userId: 'user-456',\n  includeMetadata: true,\n  includeRelatedContent: true,\n  includeUserHistory: true\n});\n\n// Suspend user and related accounts\nawait shield.suspendUser(userId, {\n  reason: 'CSAM_DETECTION',\n  permanent: true,\n  blockRelatedAccounts: true,\n  preserveEvidence: true\n});\n\n// Network analysis to find related accounts\nconst network = await shield.analyzeNetwork(userId);\nconsole.log(network);\n/* Output:\n{\n  suspiciousAccounts: [\n    { userId: 'user-789', riskScore: 0.92, connection: 'frequent_messages' },\n    { userId: 'user-012', riskScore: 0.85, connection: 'content_sharing' }\n  ],\n  distributionRing: {\n    detected: true,\n    size: 7,\n    accounts: [...]\n  },\n  recommendedActions: [\n    'investigate_all_accounts',\n    'preserve_all_evidence',\n    'notify_authorities'\n  ]\n}\n*/\n\n// Secure hash generation (for reporting only)\nconst secureHash = await shield.generateSecureHash('/path/to/content.jpg');\n\n// Update hash databases\nawait shield.updateHashDatabases();\n\n// Event listeners (CRITICAL - requires immediate response)\nshield.on('csam_detected', async (detection) => {\n  console.error('🚨 CRITICAL: CSAM DETECTED');\n\n  // Immediate actions\n  await shield.blockContent(detection.contentId);\n  await shield.suspendUser(detection.userId);\n  await shield.preserveEvidence(detection);\n  await shield.reportToNCMEC(detection);\n  await shield.notifySecurityTeam(detection);\n  await shield.alertAuthorities(detection);\n});\n\nshield.on('hash_match', async (match) => {\n  console.error('🚨 CRITICAL: Known CSAM hash matched');\n\n  // Automatic immediate actions\n  await shield.executeEmergencyProtocol(match);\n});\n\nshield.on('grooming_detected', async (behavior) => {\n  console.warn('⚠️ WARNING: Potential grooming behavior detected');\n\n  // Investigation and monitoring\n  await shield.flagForInvestigation(behavior.userId);\n  await shield.enhanceMonitoring(behavior.userId);\n});\n\n// Secure audit logging\nconst auditLog = await shield.getAuditLog({\n  type: 'csam_detection',\n  timeRange: 'last_30_days',\n  includeReports: true\n});\n\n// Staff protection - limited exposure mode\nshield.enableStaffProtection({\n  blurContent: true,\n  limitedDetails: true,\n  rotationSchedule: true,\n  mentalHealthSupport: true\n});\n\n// Compliance reporting\nconst complianceReport = await shield.generateComplianceReport({\n  period: 'monthly',\n  includeStatistics: true,\n  includeActions: true,\n  format: 'legal'\n});\n\nDependencies\n@microsoft/photodna: ^2.0.0 - PhotoDNA hashing\npdq-hash: ^1.0.0 - Facebook PDQ hashing\n@tensorflow/tfjs-node-gpu: ^4.0.0 - Age estimation models\nopencv4nodejs: ^6.0.0 - Image analysis\nncmec-reporter: ^1.0.0 - NCMEC CyberTipline integration\ncrypto: Built-in - Evidence encryption\nPerformance\nHash Matching: <10ms (database lookup)\nAge Estimation: 100-200ms per image\nFull Analysis: 200-500ms per image\nVideo Analysis: Real-time frame scanning\nAccuracy:\nHash matching: 99.9% (known CSAM)\nAge estimation: 92% accuracy (±2 years)\nContext analysis: 89% accuracy\nFalse positive rate: <0.01% (strict to prevent abuse)\nLegal Requirements\nMandatory Reporting: Report all detected CSAM to NCMEC (18 USC § 2258A)\nEvidence Preservation: Retain evidence for law enforcement (90+ days minimum)\nNo Distribution: Never distribute detected CSAM, even internally\nUser Notification: Do NOT notify user of detection (obstruction warning)\nLaw Enforcement Cooperation: Full cooperation with investigations\nInternational Compliance: Comply with local laws (IWF, INHOPE, etc.)\nUse Cases\nSocial media platforms\nMessaging applications\nFile sharing services\nCloud storage providers\nDating applications\nGaming platforms with UGC\nForum and community sites\nAny platform allowing user uploads\nTroubleshooting\nFalse Positives\n\nProblem: Legitimate content flagged as CSAM Solution:\n\nReview context analysis results\nCheck for family/medical/educational context\nManual review by trained staff ONLY\nDocument false positive for model improvement\nNEVER automatically ignore - always review\nConsider legitimate use cases in detection logic\nMissing Known CSAM\n\nProblem: Hash databases not catching known content Solution:\n\nVerify database updates are running hourly\nCheck all hash databases enabled\nEnsure proper API keys configured\nTest hash generation process\nVerify network connectivity to update servers\nContact database providers for troubleshooting\nNCMEC Reporting Failures\n\nProblem: Reports not submitting to NCMEC Solution:\n\nVerify API credentials\nCheck network connectivity\nQueue reports for retry\nManual submission if automatic fails\nContact NCMEC technical support\nKeep local evidence regardless of submission status\nAge Estimation Inaccuracy\n\nProblem: Age estimation giving unreliable results Solution:\n\nUse as one signal, not sole determinant\nCombine with other detection methods\nLower confidence threshold for safety\nUpdate age estimation models regularly\nConsider edge cases (appearing older/younger)\nWhen in doubt, err on side of caution\nEvidence Storage Issues\n\nProblem: Evidence not being preserved correctly Solution:\n\nVerify encryption keys configured\nCheck storage permissions and space\nTest evidence retrieval process\nImplement redundant storage\nRegular backup verification\nConsult legal team on retention requirements\nIntegration Example\n// ⚠️ CRITICAL SYSTEM INTEGRATION\nconst express = require('express');\nconst multer = require('multer');\nconst CSAMShield = require('@raghulpasupathi/csam-shield');\n\nconst app = express();\nconst upload = multer({ dest: '/secure/temp/' });\nconst shield = new CSAMShield({\n  mode: 'maximum-protection',\n  ncmecApiKey: process.env.NCMEC_API_KEY\n});\n\n// Critical: Pre-upload hash check\napp.post('/api/upload', upload.single('file'), async (req, res) => {\n  const tempPath = req.file.path;\n\n  try {\n    // Generate hash immediately\n    const contentHash = await shield.generateSecureHash(tempPath);\n\n    // Check against known CSAM databases FIRST\n    const hashCheck = await shield.checkHash(contentHash);\n\n    if (hashCheck.isKnownCSAM) {\n      // CRITICAL: Known CSAM detected\n      console.error('🚨 CRITICAL: Known CSAM hash matched');\n\n      // Preserve evidence\n      await shield.preserveEvidence({\n        contentHash,\n        userId: req.user.id,\n        ip: req.ip,\n        uploadAttempt: true,\n        timestamp: new Date()\n      });\n\n      // Automatic NCMEC report\n      await shield.reportToNCMEC({\n        type: 'known_csam_upload',\n        hash: contentHash,\n        user: req.user,\n        ip: req.ip\n      });\n\n      // Suspend user immediately\n      await shield.suspendUser(req.user.id, {\n        reason: 'CSAM_UPLOAD',\n        permanent: true\n      });\n\n      // Delete file securely\n      await shield.secureDelete(tempPath);\n\n      // DO NOT reveal reason to user\n      return res.status(400).json({\n        success: false,\n        error: 'Upload failed. Please contact support.'\n      });\n    }\n\n    // Perform full analysis\n    const analysis = await shield.analyze(tempPath);\n\n    if (analysis.threat === 'CRITICAL') {\n      // New CSAM detected\n      console.error('🚨 CRITICAL: Potential CSAM detected');\n\n      // Execute emergency protocol\n      await shield.executeEmergencyProtocol({\n        content: tempPath,\n        user: req.user,\n        analysis: analysis\n      });\n\n      // DO NOT reveal reason to user\n      return res.status(400).json({\n        success: false,\n        error: 'Upload failed. Please contact support.'\n      });\n    }\n\n    // Content passed all checks\n    const url = await uploadToStorage(tempPath);\n\n    res.json({\n      success: true,\n      url: url\n    });\n  } catch (error) {\n    console.error('CSAM Shield error:', error);\n\n    // Fail closed - reject upload\n    res.status(500).json({\n      success: false,\n      error: 'Upload failed. Please try again.'\n    });\n  } finally {\n    // Always clean up temp file\n    if (fs.existsSync(tempPath)) {\n      await shield.secureDelete(tempPath);\n    }\n  }\n});\n\n// Background monitoring of existing content\nasync function scanExistingContent() {\n  console.log('Starting periodic content scan...');\n\n  const contentBatch = await getContentForScanning(1000);\n\n  for (const content of contentBatch) {\n    try {\n      const hash = await shield.generateSecureHash(content.url);\n      const check = await shield.checkHash(hash);\n\n      if (check.isKnownCSAM) {\n        console.error(`🚨 CRITICAL: Known CSAM found in existing content: ${content.id}`);\n\n        // Execute emergency protocol\n        await shield.executeEmergencyProtocol({\n          contentId: content.id,\n          userId: content.userId,\n          discoveryMethod: 'periodic_scan'\n        });\n      }\n    } catch (error) {\n      console.error(`Error scanning content ${content.id}:`, error);\n    }\n  }\n}\n\n// Run hourly scans\nsetInterval(scanExistingContent, 60 * 60 * 1000);\n\n// Admin dashboard (RESTRICTED ACCESS)\napp.get('/admin/csam/dashboard', requireSecurityClearance, async (req, res) => {\n  const stats = await shield.getStats({\n    period: '30d',\n    includeReports: true\n  });\n\n  res.json({\n    success: true,\n    stats: stats,\n    warning: 'RESTRICTED: Security clearance required'\n  });\n});\n\n// Compliance reporting (LEGAL TEAM ONLY)\napp.get('/legal/csam/compliance-report', requireLegalAccess, async (req, res) => {\n  const report = await shield.generateComplianceReport({\n    period: req.query.period || 'monthly',\n    format: 'legal'\n  });\n\n  res.json({\n    success: true,\n    report: report\n  });\n});\n\nBest Practices (CRITICAL)\nZero Tolerance: No exceptions, immediate action on detection\nReport Everything: When in doubt, report to NCMEC\nPreserve Evidence: Secure storage for law enforcement\nStaff Protection: Mental health support, limited exposure\nNever Distribute: Don't share detected content internally\nLegal Compliance: Follow all mandatory reporting laws\nUser Privacy: Balance detection with legitimate user privacy\nRegular Updates: Keep hash databases current (hourly)\nAudit Everything: Complete logging for legal proceedings\nEncryption: Encrypt all evidence and sensitive data\nAccess Control: Strict role-based access to systems\nCooperation: Full cooperation with law enforcement\nEmergency Contacts\nNCMEC CyberTipline: 1-800-843-5678 / report.cybertip.org\nFBI IC3: ic3.gov\nInterpol: interpol.int/Crimes/Crimes-against-children\nIWF (UK): iwf.org.uk\nINHOPE: inhope.org\nMental Health Resources (for staff)\n\nWorking with CSAM detection is traumatic. Provide:\n\nRegular counseling services\nRotation schedules\nDebriefing sessions\nTime off after exposure\nPeer support groups\n24/7 crisis support"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/raghulpasupathi/csam-shield",
    "publisherUrl": "https://clawhub.ai/raghulpasupathi/csam-shield",
    "owner": "raghulpasupathi",
    "version": "1.0.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/csam-shield",
    "downloadUrl": "https://openagent3.xyz/downloads/csam-shield",
    "agentUrl": "https://openagent3.xyz/skills/csam-shield/agent",
    "manifestUrl": "https://openagent3.xyz/skills/csam-shield/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/csam-shield/agent.md"
  }
}