{
  "schemaVersion": "1.0",
  "item": {
    "slug": "crisis-detector",
    "name": "Crisis Detector",
    "source": "tencent",
    "type": "skill",
    "category": "AI 智能",
    "sourceUrl": "https://clawhub.ai/raghulpasupathi/crisis-detector",
    "canonicalUrl": "https://clawhub.ai/raghulpasupathi/crisis-detector",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/crisis-detector",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=crisis-detector",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SKILL.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-30T16:55:25.780Z",
      "expiresAt": "2026-05-07T16:55:25.780Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
        "contentDisposition": "attachment; filename=\"network-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/crisis-detector"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/crisis-detector",
    "agentPageUrl": "https://openagent3.xyz/skills/crisis-detector/agent",
    "manifestUrl": "https://openagent3.xyz/skills/crisis-detector/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/crisis-detector/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Metadata",
        "body": "ID: crisis-detector\nVersion: 1.0.0\nCategory: safety\nPriority: Critical\nInstallation: npm\nPackage: @raghulpasupathi/crisis-detector"
      },
      {
        "title": "Description",
        "body": "Life-saving crisis detection system for identifying self-harm, suicide ideation, and mental health emergencies. Uses NLP, sentiment analysis, and behavioral patterns to detect users in crisis and connect them with immediate help resources."
      },
      {
        "title": "Features",
        "body": "Suicide Ideation Detection: Identify expressions of suicidal thoughts\nSelf-harm Detection: Detect mentions of self-injury or harm\nCrisis Severity Scoring: Assess urgency level (low, medium, high, critical)\nIntent Classification: Distinguish between ideation, planning, and imminent risk\nHistorical Analysis: Track user patterns over time\nResource Connection: Automatic crisis resource provision\nEmergency Contact: Alert designated contacts or authorities\nReal-time Monitoring: Continuous monitoring of at-risk users\nMulti-language Support: 20+ languages for global coverage\nEmpathetic Response: Compassionate automated messaging"
      },
      {
        "title": "Via ClawHub",
        "body": "https://clawhub.ai/raghulpasupathi/crisis-detector"
      },
      {
        "title": "Via npm",
        "body": "npm install @raghulpasupathi/crisis-detector"
      },
      {
        "title": "Configuration",
        "body": "{\n  \"enabled\": true,\n  \"settings\": {\n    \"detectionMode\": \"sensitive\",\n    \"modes\": {\n      \"sensitive\": {\n        \"ideation\": 0.40,\n        \"planning\": 0.30,\n        \"imminent\": 0.20,\n        \"selfHarm\": 0.35,\n        \"actionThreshold\": 0.40\n      },\n      \"moderate\": {\n        \"ideation\": 0.60,\n        \"planning\": 0.50,\n        \"imminent\": 0.40,\n        \"selfHarm\": 0.55,\n        \"actionThreshold\": 0.60\n      },\n      \"conservative\": {\n        \"ideation\": 0.75,\n        \"planning\": 0.65,\n        \"imminent\": 0.55,\n        \"selfHarm\": 0.70,\n        \"actionThreshold\": 0.75\n      }\n    },\n    \"detection\": {\n      \"textAnalysis\": {\n        \"enabled\": true,\n        \"contextAware\": true,\n        \"historicalContext\": true\n      },\n      \"behaviorAnalysis\": {\n        \"enabled\": true,\n        \"trackPatterns\": true,\n        \"abnormalityDetection\": true\n      },\n      \"sentimentAnalysis\": {\n        \"enabled\": true,\n        \"depressionIndicators\": true,\n        \"hopelessnessDetection\": true\n      }\n    },\n    \"resources\": {\n      \"crisisHotlines\": {\n        \"enabled\": true,\n        \"international\": true,\n        \"textServices\": true,\n        \"chatServices\": true\n      },\n      \"mentalHealthResources\": {\n        \"enabled\": true,\n        \"therapistDirectory\": true,\n        \"selfHelpResources\": true,\n        \"supportGroups\": true\n      },\n      \"emergencyServices\": {\n        \"enabled\": true,\n        \"localEmergency\": true,\n        \"wellnessCheck\": false\n      }\n    },\n    \"response\": {\n      \"automaticMessage\": true,\n      \"messageTemplate\": \"caring\",\n      \"resourceDisplay\": \"immediate\",\n      \"followUp\": true,\n      \"humanOutreach\": true\n    },\n    \"actions\": {\n      \"onIdeation\": [\n        \"show_resources\",\n        \"send_caring_message\",\n        \"notify_safety_team\",\n        \"enable_monitoring\"\n      ],\n      \"onPlanning\": [\n        \"show_resources_urgent\",\n        \"immediate_outreach\",\n        \"notify_emergency_contacts\",\n        \"enable_intensive_monitoring\"\n      ],\n      \"onImminent\": [\n        \"emergency_intervention\",\n        \"contact_authorities\",\n        \"notify_emergency_contacts\",\n        \"continuous_monitoring\"\n      ]\n    },\n    \"privacy\": {\n      \"respectUserPrivacy\": true,\n      \"informedConsent\": true,\n      \"dataMinimization\": true,\n      \"confidentialLogging\": true\n    },\n    \"languages\": [\"en\", \"es\", \"fr\", \"de\", \"pt\", \"it\", \"ja\", \"ko\", \"zh\", \"ar\", \"hi\", \"ru\"]\n  }\n}"
      },
      {
        "title": "API / Methods",
        "body": "const CrisisDetector = require('@raghulpasupathi/crisis-detector');\n\n// Initialize detector\nconst detector = new CrisisDetector({\n  detectionMode: 'sensitive',\n  enableResources: true\n});\n\n// Analyze text for crisis signals\nconst result = await detector.analyze('I don\\'t want to be here anymore...');\nconsole.log(result);\n/* Output:\n{\n  isCrisis: true,\n  severity: 'high',\n  urgency: 'immediate',\n  confidence: 0.87,\n  categories: {\n    suicideIdeation: 0.89,\n    selfHarm: 0.12,\n    depression: 0.78,\n    hopelessness: 0.82\n  },\n  intent: {\n    type: 'ideation',\n    planning: false,\n    imminent: false,\n    meansIdentified: false\n  },\n  riskLevel: 'high',\n  indicators: [\n    { type: 'suicide_ideation', phrase: \"don't want to be here\", confidence: 0.91 },\n    { type: 'hopelessness', phrase: \"anymore\", confidence: 0.75 }\n  ],\n  sentiment: {\n    overall: -0.85,\n    depression: 0.82,\n    anxiety: 0.45,\n    hopelessness: 0.88\n  },\n  recommendedAction: 'immediate_intervention',\n  resources: {\n    crisisHotlines: [\n      {\n        name: 'National Suicide Prevention Lifeline',\n        phone: '988',\n        text: 'Text HOME to 741741',\n        chat: 'https://suicidepreventionlifeline.org/chat/',\n        available: '24/7'\n      },\n      {\n        name: 'Crisis Text Line',\n        text: 'Text HELLO to 741741',\n        available: '24/7'\n      }\n    ],\n    emergencyServices: {\n      call: '911',\n      text: 'Text 911 (where available)'\n    }\n  },\n  suggestedMessage: \"I'm concerned about you. You're not alone, and there are people who can help. Please reach out to the National Suicide Prevention Lifeline at 988 - they're available 24/7 and want to support you.\",\n  timestamp: '2026-02-20T10:30:00Z'\n}\n*/\n\n// Quick crisis check\nconst isCrisis = await detector.isCrisis('Some text to check');\n\n// Assess crisis severity\nconst severity = await detector.assessSeverity('Text expressing distress');\nconsole.log(severity);\n/* Output:\n{\n  level: 'high',\n  score: 0.84,\n  urgency: 'immediate',\n  riskFactors: [\n    'suicide_ideation',\n    'hopelessness',\n    'social_isolation'\n  ],\n  protectiveFactors: [\n    'help_seeking'\n  ]\n}\n*/\n\n// Detect specific crisis types\nconst suicideRisk = await detector.detectSuicideRisk('Text to analyze');\nconst selfHarmRisk = await detector.detectSelfHarmRisk('Text to analyze');\n\n// Analyze user behavior patterns\nconst behaviorAnalysis = await detector.analyzeUserBehavior(userId, {\n  recentMessages: messages,\n  activityChanges: activityData,\n  timeRange: '30d'\n});\nconsole.log(behaviorAnalysis);\n/* Output:\n{\n  concernLevel: 'elevated',\n  patterns: [\n    {\n      type: 'social_withdrawal',\n      detected: true,\n      confidence: 0.78,\n      description: 'Decreased interaction frequency by 65%'\n    },\n    {\n      type: 'negative_content_increase',\n      detected: true,\n      confidence: 0.82,\n      description: 'Increased negative sentiment in 78% of recent posts'\n    },\n    {\n      type: 'activity_time_change',\n      detected: true,\n      confidence: 0.71,\n      description: 'Shift to late-night activity (12am-4am)'\n    }\n  ],\n  riskLevel: 'moderate-high',\n  recommendation: 'enhanced_monitoring'\n}\n*/\n\n// Get crisis resources for location\nconst resources = await detector.getResources({\n  country: 'US',\n  state: 'CA',\n  language: 'en',\n  services: ['crisis_hotline', 'text_line', 'chat_support']\n});\n\n// Generate empathetic response\nconst response = await detector.generateResponse({\n  severity: 'high',\n  intent: 'ideation',\n  includeResources: true,\n  tone: 'caring'\n});\n\n// Track at-risk user\nawait detector.trackUser(userId, {\n  riskLevel: 'high',\n  monitoringIntensity: 'enhanced',\n  alertContacts: true\n});\n\n// Alert safety team\nawait detector.alertSafetyTeam({\n  userId: userId,\n  severity: 'critical',\n  analysis: analysisResult,\n  requiresImmediate: true\n});\n\n// Send caring outreach\nawait detector.sendOutreach(userId, {\n  type: 'caring_message',\n  includeResources: true,\n  fromHuman: true\n});\n\n// Check user status\nconst status = await detector.getUserStatus(userId);\nconsole.log(status);\n/* Output:\n{\n  userId: 'user-123',\n  currentRiskLevel: 'moderate',\n  monitoringStatus: 'enhanced',\n  lastCrisisDetection: '2026-02-18T14:30:00Z',\n  outreachAttempts: 2,\n  resourcesProvided: true,\n  emergencyContactNotified: false,\n  trend: 'stable'\n}\n*/\n\n// Event listeners\ndetector.on('crisisDetected', async (crisis) => {\n  console.warn('⚠️ CRISIS DETECTED:', crisis);\n\n  // Immediate response\n  await detector.sendOutreach(crisis.userId, {\n    severity: crisis.severity,\n    resources: crisis.resources\n  });\n\n  // Notify safety team\n  await detector.alertSafetyTeam(crisis);\n});\n\ndetector.on('imminentRisk', async (risk) => {\n  console.error('🚨 IMMINENT RISK DETECTED');\n\n  // Emergency intervention\n  await detector.executeEmergencyProtocol(risk);\n\n  // Consider wellness check\n  if (risk.severity === 'critical') {\n    await detector.considerWellnessCheck(risk);\n  }\n});\n\ndetector.on('improvementDetected', (update) => {\n  console.log('✓ User showing improvement:', update);\n\n  // Continue support\n  await detector.sendEncouragement(update.userId);\n});\n\n// Performance stats\nconst stats = detector.getStats();\nconsole.log(stats);\n/* Output:\n{\n  totalAnalyses: 100000,\n  crisisDetected: 1250,\n  bySeverity: {\n    low: 400,\n    moderate: 550,\n    high: 250,\n    critical: 50\n  },\n  interventions: 1250,\n  resourcesProvided: 1250,\n  emergencyContacts: 75,\n  positiveOutcomes: 980,\n  averageResponseTime: '45s'\n}\n*/"
      },
      {
        "title": "Dependencies",
        "body": "transformers.js: ^2.6.0 - Sentiment and intent analysis\nnatural: ^6.0.0 - NLP processing\ncompromise: ^14.0.0 - Text understanding\nsentiment: ^5.0.0 - Sentiment analysis\nfranc-min: ^6.0.0 - Language detection"
      },
      {
        "title": "Performance",
        "body": "Analysis Speed: 30-80ms per text\nAccuracy:\n\nSuicide ideation: 91% detection rate\nSelf-harm: 88% detection rate\nCrisis severity: 87% accuracy\nFalse positive rate: 8-12% (intentionally higher for safety)\n\n\nResponse Time: <1 minute for outreach"
      },
      {
        "title": "Crisis Hotlines (Global)",
        "body": "USA: 988 Suicide & Crisis Lifeline\nUK: 116 123 (Samaritans)\nCanada: 1-833-456-4566\nAustralia: 13 11 14 (Lifeline)\nInternational: befrienders.org"
      },
      {
        "title": "Use Cases",
        "body": "Social media platforms\nMental health apps\nGaming communities\nDating applications\nStudent portals\nEmployee wellness platforms\nSupport forums\nChat applications\nAny platform with user communication"
      },
      {
        "title": "High False Positive Rate",
        "body": "Problem: Too many non-crisis messages flagged\nSolution:\n\nSwitch to 'moderate' or 'conservative' mode\nEnable more context analysis\nCheck for sarcasm/humor detection\nReview historical user behavior\nAdjust thresholds upward\nBetter to err on side of caution - false positives are acceptable"
      },
      {
        "title": "Missing Crisis Signals",
        "body": "Problem: Not detecting users in genuine crisis\nSolution:\n\nSwitch to 'sensitive' mode\nLower detection thresholds\nEnable behavioral analysis\nCheck language support\nReview flagged phrases database\nReport false negatives for model improvement\nThis is more serious than false positives"
      },
      {
        "title": "User Privacy Concerns",
        "body": "Problem: Users concerned about monitoring\nSolution:\n\nClear privacy policy disclosure\nExplain life-saving purpose\nOffer opt-in for enhanced monitoring\nData minimization practices\nConfidential handling of sensitive data\nBalance privacy with safety"
      },
      {
        "title": "Resource Availability",
        "body": "Problem: Crisis resources not available in user's location\nSolution:\n\nExpand international hotline database\nInclude online chat/text services\nProvide self-help resources\nConnect to community support\nPartner with local organizations\nAlways provide emergency services number"
      },
      {
        "title": "Response Effectiveness",
        "body": "Problem: Unsure if interventions are helping\nSolution:\n\nTrack user engagement with resources\nMonitor user behavior after intervention\nFollow-up outreach after 24-48 hours\nCollect feedback when appropriate\nPartner with mental health professionals\nContinuous improvement of messaging"
      },
      {
        "title": "Integration Example",
        "body": "// Complete platform integration\nconst express = require('express');\nconst CrisisDetector = require('@raghulpasupathi/crisis-detector');\n\nconst app = express();\nconst detector = new CrisisDetector({ detectionMode: 'sensitive' });\n\n// At-risk user tracking\nconst atRiskUsers = new Map();\n\n// Monitor all user-generated content\napp.post('/api/posts/create', async (req, res) => {\n  try {\n    const { userId, content } = req.body;\n\n    // Analyze for crisis signals\n    const analysis = await detector.analyze(content);\n\n    if (analysis.isCrisis) {\n      console.warn(`⚠️ Crisis detected for user ${userId}`);\n\n      // Log for safety team (confidential)\n      await logCrisisEvent(userId, analysis);\n\n      // Immediate response based on severity\n      if (analysis.severity === 'critical' || analysis.intent.imminent) {\n        // CRITICAL: Imminent risk\n        console.error(`🚨 IMMINENT RISK: User ${userId}`);\n\n        // Show resources immediately\n        await showEmergencyResources(userId, analysis.resources);\n\n        // Alert safety team for immediate outreach\n        await alertSafetyTeam({\n          userId: userId,\n          severity: 'critical',\n          analysis: analysis,\n          urgent: true\n        });\n\n        // Consider emergency services\n        if (analysis.riskLevel === 'critical') {\n          await considerEmergencyServices(userId, analysis);\n        }\n\n        // Track intensively\n        await detector.trackUser(userId, {\n          riskLevel: 'critical',\n          monitoringIntensity: 'maximum'\n        });\n      } else if (analysis.severity === 'high') {\n        // High risk: Immediate support\n        await sendCaringMessage(userId, {\n          message: analysis.suggestedMessage,\n          resources: analysis.resources\n        });\n\n        await alertSafetyTeam({\n          userId: userId,\n          severity: 'high',\n          analysis: analysis,\n          urgent: false\n        });\n\n        await detector.trackUser(userId, {\n          riskLevel: 'high',\n          monitoringIntensity: 'enhanced'\n        });\n      } else {\n        // Moderate risk: Show resources\n        await showSupportResources(userId, analysis.resources);\n\n        await detector.trackUser(userId, {\n          riskLevel: 'moderate',\n          monitoringIntensity: 'standard'\n        });\n      }\n\n      // Update tracking\n      atRiskUsers.set(userId, {\n        lastDetection: new Date(),\n        severity: analysis.severity,\n        analysis: analysis\n      });\n    }\n\n    // Allow post (don't censor crisis content)\n    const post = await createPost(userId, content);\n\n    res.json({\n      success: true,\n      postId: post.id\n    });\n  } catch (error) {\n    res.status(500).json({ error: error.message });\n  }\n});\n\n// Background monitoring of at-risk users\nsetInterval(async () => {\n  for (const [userId, tracking] of atRiskUsers) {\n    // Check recent activity\n    const behavior = await detector.analyzeUserBehavior(userId, {\n      timeRange: '24h'\n    });\n\n    if (behavior.concernLevel === 'elevated') {\n      // Send follow-up\n      await sendFollowUp(userId, {\n        type: 'check_in',\n        resources: true\n      });\n    } else if (behavior.concernLevel === 'improved') {\n      // Positive trend\n      await sendEncouragement(userId);\n\n      // Reduce monitoring intensity\n      await detector.trackUser(userId, {\n        riskLevel: 'low',\n        monitoringIntensity: 'minimal'\n      });\n    }\n  }\n}, 6 * 60 * 60 * 1000); // Every 6 hours\n\n// User dashboard - show resources\napp.get('/api/mental-health/resources', async (req, res) => {\n  const userId = req.user.id;\n\n  const resources = await detector.getResources({\n    country: req.user.country,\n    language: req.user.language\n  });\n\n  res.json({\n    success: true,\n    resources: resources,\n    message: 'You\\'re not alone. Help is available 24/7.'\n  });\n});\n\n// Safety team dashboard\napp.get('/admin/safety/crisis-monitor', requireSafetyTeam, async (req, res) => {\n  const activeAlerts = await getActiveCrisisAlerts();\n\n  // Add current status for each\n  const enriched = await Promise.all(\n    activeAlerts.map(async alert => {\n      const status = await detector.getUserStatus(alert.userId);\n      return { ...alert, status };\n    })\n  );\n\n  res.json({\n    success: true,\n    alerts: enriched,\n    count: enriched.length\n  });\n});\n\n// Helper functions\nasync function sendCaringMessage(userId, { message, resources }) {\n  await sendNotification(userId, {\n    title: 'We\\'re here for you',\n    body: message,\n    action: {\n      text: 'Get help now',\n      url: '/mental-health/resources'\n    },\n    priority: 'high'\n  });\n\n  // Log outreach attempt\n  await logOutreach(userId, 'caring_message');\n}\n\nasync function considerEmergencyServices(userId, analysis) {\n  // This is a sensitive decision requiring human judgment\n  await alertSafetyTeam({\n    userId: userId,\n    message: 'CRITICAL: Consider wellness check',\n    analysis: analysis,\n    requiresDecision: true\n  });\n}"
      },
      {
        "title": "Best Practices (CRITICAL)",
        "body": "Err on Side of Caution: False positives acceptable, false negatives are not\nImmediate Response: Respond within minutes, not hours\nEmpathetic Messaging: Use caring, non-judgmental language\nResource Accessibility: Make help easy to access\nPrivacy Protection: Handle crisis data with extreme sensitivity\nDon't Censor: Allow users to express distress (but provide help)\nHuman Follow-up: Automated detection + human outreach\nCultural Sensitivity: Respect cultural differences in expression\nContinuous Monitoring: Track at-risk users over time\nStaff Training: Safety team needs mental health training\nNever Ignore: Every detection requires action\nDocument Everything: Confidential logging for legal protection"
      },
      {
        "title": "Legal & Ethical Considerations",
        "body": "Duty of care to users in crisis\nBalance privacy with safety\nClear terms of service about crisis intervention\nStaff training and support\nCoordination with local emergency services\nLiability protection for good-faith interventions\nConfidential handling of sensitive data\nInformed consent where possible"
      },
      {
        "title": "Support for Your Team",
        "body": "Working with crisis detection is emotionally demanding:\n\nRegular mental health support for safety team\nClear escalation procedures\nShared responsibility (not one person's burden)\nCelebrate positive outcomes\nDebrief after difficult cases\nKnow when to involve professionals\nTeam support and peer counseling"
      }
    ],
    "body": "Crisis Detector\nMetadata\nID: crisis-detector\nVersion: 1.0.0\nCategory: safety\nPriority: Critical\nInstallation: npm\nPackage: @raghulpasupathi/crisis-detector\nDescription\n\nLife-saving crisis detection system for identifying self-harm, suicide ideation, and mental health emergencies. Uses NLP, sentiment analysis, and behavioral patterns to detect users in crisis and connect them with immediate help resources.\n\nFeatures\nSuicide Ideation Detection: Identify expressions of suicidal thoughts\nSelf-harm Detection: Detect mentions of self-injury or harm\nCrisis Severity Scoring: Assess urgency level (low, medium, high, critical)\nIntent Classification: Distinguish between ideation, planning, and imminent risk\nHistorical Analysis: Track user patterns over time\nResource Connection: Automatic crisis resource provision\nEmergency Contact: Alert designated contacts or authorities\nReal-time Monitoring: Continuous monitoring of at-risk users\nMulti-language Support: 20+ languages for global coverage\nEmpathetic Response: Compassionate automated messaging\nInstallation\nVia ClawHub\nhttps://clawhub.ai/raghulpasupathi/crisis-detector\n\nVia npm\nnpm install @raghulpasupathi/crisis-detector\n\nConfiguration\n{\n  \"enabled\": true,\n  \"settings\": {\n    \"detectionMode\": \"sensitive\",\n    \"modes\": {\n      \"sensitive\": {\n        \"ideation\": 0.40,\n        \"planning\": 0.30,\n        \"imminent\": 0.20,\n        \"selfHarm\": 0.35,\n        \"actionThreshold\": 0.40\n      },\n      \"moderate\": {\n        \"ideation\": 0.60,\n        \"planning\": 0.50,\n        \"imminent\": 0.40,\n        \"selfHarm\": 0.55,\n        \"actionThreshold\": 0.60\n      },\n      \"conservative\": {\n        \"ideation\": 0.75,\n        \"planning\": 0.65,\n        \"imminent\": 0.55,\n        \"selfHarm\": 0.70,\n        \"actionThreshold\": 0.75\n      }\n    },\n    \"detection\": {\n      \"textAnalysis\": {\n        \"enabled\": true,\n        \"contextAware\": true,\n        \"historicalContext\": true\n      },\n      \"behaviorAnalysis\": {\n        \"enabled\": true,\n        \"trackPatterns\": true,\n        \"abnormalityDetection\": true\n      },\n      \"sentimentAnalysis\": {\n        \"enabled\": true,\n        \"depressionIndicators\": true,\n        \"hopelessnessDetection\": true\n      }\n    },\n    \"resources\": {\n      \"crisisHotlines\": {\n        \"enabled\": true,\n        \"international\": true,\n        \"textServices\": true,\n        \"chatServices\": true\n      },\n      \"mentalHealthResources\": {\n        \"enabled\": true,\n        \"therapistDirectory\": true,\n        \"selfHelpResources\": true,\n        \"supportGroups\": true\n      },\n      \"emergencyServices\": {\n        \"enabled\": true,\n        \"localEmergency\": true,\n        \"wellnessCheck\": false\n      }\n    },\n    \"response\": {\n      \"automaticMessage\": true,\n      \"messageTemplate\": \"caring\",\n      \"resourceDisplay\": \"immediate\",\n      \"followUp\": true,\n      \"humanOutreach\": true\n    },\n    \"actions\": {\n      \"onIdeation\": [\n        \"show_resources\",\n        \"send_caring_message\",\n        \"notify_safety_team\",\n        \"enable_monitoring\"\n      ],\n      \"onPlanning\": [\n        \"show_resources_urgent\",\n        \"immediate_outreach\",\n        \"notify_emergency_contacts\",\n        \"enable_intensive_monitoring\"\n      ],\n      \"onImminent\": [\n        \"emergency_intervention\",\n        \"contact_authorities\",\n        \"notify_emergency_contacts\",\n        \"continuous_monitoring\"\n      ]\n    },\n    \"privacy\": {\n      \"respectUserPrivacy\": true,\n      \"informedConsent\": true,\n      \"dataMinimization\": true,\n      \"confidentialLogging\": true\n    },\n    \"languages\": [\"en\", \"es\", \"fr\", \"de\", \"pt\", \"it\", \"ja\", \"ko\", \"zh\", \"ar\", \"hi\", \"ru\"]\n  }\n}\n\nAPI / Methods\nconst CrisisDetector = require('@raghulpasupathi/crisis-detector');\n\n// Initialize detector\nconst detector = new CrisisDetector({\n  detectionMode: 'sensitive',\n  enableResources: true\n});\n\n// Analyze text for crisis signals\nconst result = await detector.analyze('I don\\'t want to be here anymore...');\nconsole.log(result);\n/* Output:\n{\n  isCrisis: true,\n  severity: 'high',\n  urgency: 'immediate',\n  confidence: 0.87,\n  categories: {\n    suicideIdeation: 0.89,\n    selfHarm: 0.12,\n    depression: 0.78,\n    hopelessness: 0.82\n  },\n  intent: {\n    type: 'ideation',\n    planning: false,\n    imminent: false,\n    meansIdentified: false\n  },\n  riskLevel: 'high',\n  indicators: [\n    { type: 'suicide_ideation', phrase: \"don't want to be here\", confidence: 0.91 },\n    { type: 'hopelessness', phrase: \"anymore\", confidence: 0.75 }\n  ],\n  sentiment: {\n    overall: -0.85,\n    depression: 0.82,\n    anxiety: 0.45,\n    hopelessness: 0.88\n  },\n  recommendedAction: 'immediate_intervention',\n  resources: {\n    crisisHotlines: [\n      {\n        name: 'National Suicide Prevention Lifeline',\n        phone: '988',\n        text: 'Text HOME to 741741',\n        chat: 'https://suicidepreventionlifeline.org/chat/',\n        available: '24/7'\n      },\n      {\n        name: 'Crisis Text Line',\n        text: 'Text HELLO to 741741',\n        available: '24/7'\n      }\n    ],\n    emergencyServices: {\n      call: '911',\n      text: 'Text 911 (where available)'\n    }\n  },\n  suggestedMessage: \"I'm concerned about you. You're not alone, and there are people who can help. Please reach out to the National Suicide Prevention Lifeline at 988 - they're available 24/7 and want to support you.\",\n  timestamp: '2026-02-20T10:30:00Z'\n}\n*/\n\n// Quick crisis check\nconst isCrisis = await detector.isCrisis('Some text to check');\n\n// Assess crisis severity\nconst severity = await detector.assessSeverity('Text expressing distress');\nconsole.log(severity);\n/* Output:\n{\n  level: 'high',\n  score: 0.84,\n  urgency: 'immediate',\n  riskFactors: [\n    'suicide_ideation',\n    'hopelessness',\n    'social_isolation'\n  ],\n  protectiveFactors: [\n    'help_seeking'\n  ]\n}\n*/\n\n// Detect specific crisis types\nconst suicideRisk = await detector.detectSuicideRisk('Text to analyze');\nconst selfHarmRisk = await detector.detectSelfHarmRisk('Text to analyze');\n\n// Analyze user behavior patterns\nconst behaviorAnalysis = await detector.analyzeUserBehavior(userId, {\n  recentMessages: messages,\n  activityChanges: activityData,\n  timeRange: '30d'\n});\nconsole.log(behaviorAnalysis);\n/* Output:\n{\n  concernLevel: 'elevated',\n  patterns: [\n    {\n      type: 'social_withdrawal',\n      detected: true,\n      confidence: 0.78,\n      description: 'Decreased interaction frequency by 65%'\n    },\n    {\n      type: 'negative_content_increase',\n      detected: true,\n      confidence: 0.82,\n      description: 'Increased negative sentiment in 78% of recent posts'\n    },\n    {\n      type: 'activity_time_change',\n      detected: true,\n      confidence: 0.71,\n      description: 'Shift to late-night activity (12am-4am)'\n    }\n  ],\n  riskLevel: 'moderate-high',\n  recommendation: 'enhanced_monitoring'\n}\n*/\n\n// Get crisis resources for location\nconst resources = await detector.getResources({\n  country: 'US',\n  state: 'CA',\n  language: 'en',\n  services: ['crisis_hotline', 'text_line', 'chat_support']\n});\n\n// Generate empathetic response\nconst response = await detector.generateResponse({\n  severity: 'high',\n  intent: 'ideation',\n  includeResources: true,\n  tone: 'caring'\n});\n\n// Track at-risk user\nawait detector.trackUser(userId, {\n  riskLevel: 'high',\n  monitoringIntensity: 'enhanced',\n  alertContacts: true\n});\n\n// Alert safety team\nawait detector.alertSafetyTeam({\n  userId: userId,\n  severity: 'critical',\n  analysis: analysisResult,\n  requiresImmediate: true\n});\n\n// Send caring outreach\nawait detector.sendOutreach(userId, {\n  type: 'caring_message',\n  includeResources: true,\n  fromHuman: true\n});\n\n// Check user status\nconst status = await detector.getUserStatus(userId);\nconsole.log(status);\n/* Output:\n{\n  userId: 'user-123',\n  currentRiskLevel: 'moderate',\n  monitoringStatus: 'enhanced',\n  lastCrisisDetection: '2026-02-18T14:30:00Z',\n  outreachAttempts: 2,\n  resourcesProvided: true,\n  emergencyContactNotified: false,\n  trend: 'stable'\n}\n*/\n\n// Event listeners\ndetector.on('crisisDetected', async (crisis) => {\n  console.warn('⚠️ CRISIS DETECTED:', crisis);\n\n  // Immediate response\n  await detector.sendOutreach(crisis.userId, {\n    severity: crisis.severity,\n    resources: crisis.resources\n  });\n\n  // Notify safety team\n  await detector.alertSafetyTeam(crisis);\n});\n\ndetector.on('imminentRisk', async (risk) => {\n  console.error('🚨 IMMINENT RISK DETECTED');\n\n  // Emergency intervention\n  await detector.executeEmergencyProtocol(risk);\n\n  // Consider wellness check\n  if (risk.severity === 'critical') {\n    await detector.considerWellnessCheck(risk);\n  }\n});\n\ndetector.on('improvementDetected', (update) => {\n  console.log('✓ User showing improvement:', update);\n\n  // Continue support\n  await detector.sendEncouragement(update.userId);\n});\n\n// Performance stats\nconst stats = detector.getStats();\nconsole.log(stats);\n/* Output:\n{\n  totalAnalyses: 100000,\n  crisisDetected: 1250,\n  bySeverity: {\n    low: 400,\n    moderate: 550,\n    high: 250,\n    critical: 50\n  },\n  interventions: 1250,\n  resourcesProvided: 1250,\n  emergencyContacts: 75,\n  positiveOutcomes: 980,\n  averageResponseTime: '45s'\n}\n*/\n\nDependencies\ntransformers.js: ^2.6.0 - Sentiment and intent analysis\nnatural: ^6.0.0 - NLP processing\ncompromise: ^14.0.0 - Text understanding\nsentiment: ^5.0.0 - Sentiment analysis\nfranc-min: ^6.0.0 - Language detection\nPerformance\nAnalysis Speed: 30-80ms per text\nAccuracy:\nSuicide ideation: 91% detection rate\nSelf-harm: 88% detection rate\nCrisis severity: 87% accuracy\nFalse positive rate: 8-12% (intentionally higher for safety)\nResponse Time: <1 minute for outreach\nCrisis Hotlines (Global)\nUSA: 988 Suicide & Crisis Lifeline\nUK: 116 123 (Samaritans)\nCanada: 1-833-456-4566\nAustralia: 13 11 14 (Lifeline)\nInternational: befrienders.org\nUse Cases\nSocial media platforms\nMental health apps\nGaming communities\nDating applications\nStudent portals\nEmployee wellness platforms\nSupport forums\nChat applications\nAny platform with user communication\nTroubleshooting\nHigh False Positive Rate\n\nProblem: Too many non-crisis messages flagged Solution:\n\nSwitch to 'moderate' or 'conservative' mode\nEnable more context analysis\nCheck for sarcasm/humor detection\nReview historical user behavior\nAdjust thresholds upward\nBetter to err on side of caution - false positives are acceptable\nMissing Crisis Signals\n\nProblem: Not detecting users in genuine crisis Solution:\n\nSwitch to 'sensitive' mode\nLower detection thresholds\nEnable behavioral analysis\nCheck language support\nReview flagged phrases database\nReport false negatives for model improvement\nThis is more serious than false positives\nUser Privacy Concerns\n\nProblem: Users concerned about monitoring Solution:\n\nClear privacy policy disclosure\nExplain life-saving purpose\nOffer opt-in for enhanced monitoring\nData minimization practices\nConfidential handling of sensitive data\nBalance privacy with safety\nResource Availability\n\nProblem: Crisis resources not available in user's location Solution:\n\nExpand international hotline database\nInclude online chat/text services\nProvide self-help resources\nConnect to community support\nPartner with local organizations\nAlways provide emergency services number\nResponse Effectiveness\n\nProblem: Unsure if interventions are helping Solution:\n\nTrack user engagement with resources\nMonitor user behavior after intervention\nFollow-up outreach after 24-48 hours\nCollect feedback when appropriate\nPartner with mental health professionals\nContinuous improvement of messaging\nIntegration Example\n// Complete platform integration\nconst express = require('express');\nconst CrisisDetector = require('@raghulpasupathi/crisis-detector');\n\nconst app = express();\nconst detector = new CrisisDetector({ detectionMode: 'sensitive' });\n\n// At-risk user tracking\nconst atRiskUsers = new Map();\n\n// Monitor all user-generated content\napp.post('/api/posts/create', async (req, res) => {\n  try {\n    const { userId, content } = req.body;\n\n    // Analyze for crisis signals\n    const analysis = await detector.analyze(content);\n\n    if (analysis.isCrisis) {\n      console.warn(`⚠️ Crisis detected for user ${userId}`);\n\n      // Log for safety team (confidential)\n      await logCrisisEvent(userId, analysis);\n\n      // Immediate response based on severity\n      if (analysis.severity === 'critical' || analysis.intent.imminent) {\n        // CRITICAL: Imminent risk\n        console.error(`🚨 IMMINENT RISK: User ${userId}`);\n\n        // Show resources immediately\n        await showEmergencyResources(userId, analysis.resources);\n\n        // Alert safety team for immediate outreach\n        await alertSafetyTeam({\n          userId: userId,\n          severity: 'critical',\n          analysis: analysis,\n          urgent: true\n        });\n\n        // Consider emergency services\n        if (analysis.riskLevel === 'critical') {\n          await considerEmergencyServices(userId, analysis);\n        }\n\n        // Track intensively\n        await detector.trackUser(userId, {\n          riskLevel: 'critical',\n          monitoringIntensity: 'maximum'\n        });\n      } else if (analysis.severity === 'high') {\n        // High risk: Immediate support\n        await sendCaringMessage(userId, {\n          message: analysis.suggestedMessage,\n          resources: analysis.resources\n        });\n\n        await alertSafetyTeam({\n          userId: userId,\n          severity: 'high',\n          analysis: analysis,\n          urgent: false\n        });\n\n        await detector.trackUser(userId, {\n          riskLevel: 'high',\n          monitoringIntensity: 'enhanced'\n        });\n      } else {\n        // Moderate risk: Show resources\n        await showSupportResources(userId, analysis.resources);\n\n        await detector.trackUser(userId, {\n          riskLevel: 'moderate',\n          monitoringIntensity: 'standard'\n        });\n      }\n\n      // Update tracking\n      atRiskUsers.set(userId, {\n        lastDetection: new Date(),\n        severity: analysis.severity,\n        analysis: analysis\n      });\n    }\n\n    // Allow post (don't censor crisis content)\n    const post = await createPost(userId, content);\n\n    res.json({\n      success: true,\n      postId: post.id\n    });\n  } catch (error) {\n    res.status(500).json({ error: error.message });\n  }\n});\n\n// Background monitoring of at-risk users\nsetInterval(async () => {\n  for (const [userId, tracking] of atRiskUsers) {\n    // Check recent activity\n    const behavior = await detector.analyzeUserBehavior(userId, {\n      timeRange: '24h'\n    });\n\n    if (behavior.concernLevel === 'elevated') {\n      // Send follow-up\n      await sendFollowUp(userId, {\n        type: 'check_in',\n        resources: true\n      });\n    } else if (behavior.concernLevel === 'improved') {\n      // Positive trend\n      await sendEncouragement(userId);\n\n      // Reduce monitoring intensity\n      await detector.trackUser(userId, {\n        riskLevel: 'low',\n        monitoringIntensity: 'minimal'\n      });\n    }\n  }\n}, 6 * 60 * 60 * 1000); // Every 6 hours\n\n// User dashboard - show resources\napp.get('/api/mental-health/resources', async (req, res) => {\n  const userId = req.user.id;\n\n  const resources = await detector.getResources({\n    country: req.user.country,\n    language: req.user.language\n  });\n\n  res.json({\n    success: true,\n    resources: resources,\n    message: 'You\\'re not alone. Help is available 24/7.'\n  });\n});\n\n// Safety team dashboard\napp.get('/admin/safety/crisis-monitor', requireSafetyTeam, async (req, res) => {\n  const activeAlerts = await getActiveCrisisAlerts();\n\n  // Add current status for each\n  const enriched = await Promise.all(\n    activeAlerts.map(async alert => {\n      const status = await detector.getUserStatus(alert.userId);\n      return { ...alert, status };\n    })\n  );\n\n  res.json({\n    success: true,\n    alerts: enriched,\n    count: enriched.length\n  });\n});\n\n// Helper functions\nasync function sendCaringMessage(userId, { message, resources }) {\n  await sendNotification(userId, {\n    title: 'We\\'re here for you',\n    body: message,\n    action: {\n      text: 'Get help now',\n      url: '/mental-health/resources'\n    },\n    priority: 'high'\n  });\n\n  // Log outreach attempt\n  await logOutreach(userId, 'caring_message');\n}\n\nasync function considerEmergencyServices(userId, analysis) {\n  // This is a sensitive decision requiring human judgment\n  await alertSafetyTeam({\n    userId: userId,\n    message: 'CRITICAL: Consider wellness check',\n    analysis: analysis,\n    requiresDecision: true\n  });\n}\n\nBest Practices (CRITICAL)\nErr on Side of Caution: False positives acceptable, false negatives are not\nImmediate Response: Respond within minutes, not hours\nEmpathetic Messaging: Use caring, non-judgmental language\nResource Accessibility: Make help easy to access\nPrivacy Protection: Handle crisis data with extreme sensitivity\nDon't Censor: Allow users to express distress (but provide help)\nHuman Follow-up: Automated detection + human outreach\nCultural Sensitivity: Respect cultural differences in expression\nContinuous Monitoring: Track at-risk users over time\nStaff Training: Safety team needs mental health training\nNever Ignore: Every detection requires action\nDocument Everything: Confidential logging for legal protection\nLegal & Ethical Considerations\nDuty of care to users in crisis\nBalance privacy with safety\nClear terms of service about crisis intervention\nStaff training and support\nCoordination with local emergency services\nLiability protection for good-faith interventions\nConfidential handling of sensitive data\nInformed consent where possible\nSupport for Your Team\n\nWorking with crisis detection is emotionally demanding:\n\nRegular mental health support for safety team\nClear escalation procedures\nShared responsibility (not one person's burden)\nCelebrate positive outcomes\nDebrief after difficult cases\nKnow when to involve professionals\nTeam support and peer counseling"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/raghulpasupathi/crisis-detector",
    "publisherUrl": "https://clawhub.ai/raghulpasupathi/crisis-detector",
    "owner": "raghulpasupathi",
    "version": "1.0.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/crisis-detector",
    "downloadUrl": "https://openagent3.xyz/downloads/crisis-detector",
    "agentUrl": "https://openagent3.xyz/skills/crisis-detector/agent",
    "manifestUrl": "https://openagent3.xyz/skills/crisis-detector/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/crisis-detector/agent.md"
  }
}