{
  "schemaVersion": "1.0",
  "item": {
    "slug": "openclaw-voice-assistant",
    "name": "Voice Assistant",
    "source": "tencent",
    "type": "skill",
    "category": "AI 智能",
    "sourceUrl": "https://clawhub.ai/kurtivy/openclaw-voice-assistant",
    "canonicalUrl": "https://clawhub.ai/kurtivy/openclaw-voice-assistant",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/openclaw-voice-assistant",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=openclaw-voice-assistant",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "references/architecture.md",
      "references/troubleshooting.md",
      "scripts/generate_assets.py",
      "scripts/generate_chime_sounds.py",
      "scripts/generate_thinking_sounds.py",
      "scripts/requirements.txt"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-30T16:55:25.780Z",
      "expiresAt": "2026-05-07T16:55:25.780Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
        "contentDisposition": "attachment; filename=\"network-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/openclaw-voice-assistant"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/openclaw-voice-assistant",
    "agentPageUrl": "https://openagent3.xyz/skills/openclaw-voice-assistant/agent",
    "manifestUrl": "https://openagent3.xyz/skills/openclaw-voice-assistant/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/openclaw-voice-assistant/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Voice Assistant for OpenClaw",
        "body": "A Python companion app that gives OpenClaw a voice. Say a wake word (or press a\nhotkey), speak naturally, and hear the AI respond — then keep talking for\nmulti-turn conversation.\n\nMic → Porcupine wake word → faster-whisper STT → OpenClaw Gateway → ElevenLabs TTS → Speaker"
      },
      {
        "title": "Quick Start",
        "body": "# 1. Navigate to the skill scripts\ncd {baseDir}/scripts\n\n# 2. Create a virtual environment and install dependencies\npython -m venv venv\nvenv\\Scripts\\pip install -r requirements.txt\n\n# 3. Copy .env.example to .env and fill in your keys\ncopy .env.example .env\n\n# 4. Run the assistant\nvenv\\Scripts\\python src\\assistant.py"
      },
      {
        "title": "Requirements",
        "body": "ServiceWhat you needCostOpenClaw gatewayRunning locally on ws://127.0.0.1:18789 with a gateway token—ElevenLabsAPI key + voice ID (free tier works with default voices)Free+PicovoiceAccess key from picovoice.ai (free tier works)FreePython3.10+ (tested on 3.14)—MicrophoneAny input device—"
      },
      {
        "title": "Configuration (.env)",
        "body": "# OpenClaw Gateway\nGATEWAY_URL=ws://127.0.0.1:18789\nGATEWAY_TOKEN=your-gateway-token\n\n# ElevenLabs TTS\nELEVENLABS_API_KEY=your-api-key\nELEVENLABS_VOICE_ID=XrExE9yKIg1WjnnlVkGX  # Matilda (free tier) — or MClEFoImJXBTgLwdLI5n for Ivy (paid)\nELEVENLABS_MODEL_ID=eleven_v3\n\n# Porcupine Wake Word\nPORCUPINE_ACCESS_KEY=your-access-key\nPORCUPINE_MODEL_PATH=              # path to custom .ppn file (optional)\n\n# Whisper STT\nWHISPER_MODEL=base                  # tiny, base, small, medium, large\n\n# Tuning\nWAKE_SENSITIVITY=0.7               # 0.0–1.0 (higher = more sensitive)\nSILENCE_TIMEOUT=1.5                # seconds of silence to stop recording\nHOTKEY=ctrl+shift+k                # global keyboard shortcut"
      },
      {
        "title": "Custom Wake Word",
        "body": "Go to Picovoice Console\nCreate a custom wake word (e.g. \"Hey Claudia\", \"Hey OpenClaw\")\nDownload the .ppn file for your platform\nSet PORCUPINE_MODEL_PATH in .env to the file path\nWithout a custom model, falls back to built-in \"hey google\""
      },
      {
        "title": "Personalized Voice Sounds",
        "body": "The assistant plays short audio clips when activated (\"Yep!\", \"Hi!\") and while\nthinking (\"Hmm...\", \"Let me think...\"). Generate these in your chosen ElevenLabs\nvoice:\n\ncd {baseDir}/scripts\nvenv\\Scripts\\python generate_chime_sounds.py\nvenv\\Scripts\\python generate_thinking_sounds.py\n\nRe-run these after changing ELEVENLABS_VOICE_ID."
      },
      {
        "title": "Running in Background",
        "body": "Use start.bat to launch without a console window (runs via pythonw.exe).\nThe assistant appears as a system tray icon with Pause/Resume/Quit controls.\n\nFor auto-start on Windows, create a shortcut to start.bat in shell:startup."
      },
      {
        "title": "How It Works",
        "body": "Wake — Porcupine detects the wake word (or user presses hotkey)\nChime — Plays a random activation sound (\"Yep!\", \"Hi!\")\nRecord — Records speech until 1.5s of silence (2s grace period for initial silence)\nThinking — Plays a filler sound (\"Hmm...\", \"Let me think...\")\nTranscribe — faster-whisper converts audio to text locally (CPU, int8)\nGateway — Sends text to OpenClaw gateway via WebSocket, streams response\nSpeak — ElevenLabs converts response to speech, plays through speakers\nFollow-up — Automatically listens for 5s after speaking for conversation continuity\nIdle — Returns to wake word listening after 5s of silence\n\nMic suppression keeps the microphone muted during all speaker output to prevent\nfeedback loops."
      },
      {
        "title": "Detailed Architecture",
        "body": "See references/architecture.md for source file\nbreakdown, WebSocket protocol details, and audio pipeline internals."
      },
      {
        "title": "Troubleshooting",
        "body": "See references/troubleshooting.md for common\nissues with mic detection, gateway connection, TTS errors, and wake word tuning."
      }
    ],
    "body": "Voice Assistant for OpenClaw\n\nA Python companion app that gives OpenClaw a voice. Say a wake word (or press a hotkey), speak naturally, and hear the AI respond — then keep talking for multi-turn conversation.\n\nMic → Porcupine wake word → faster-whisper STT → OpenClaw Gateway → ElevenLabs TTS → Speaker\n\nQuick Start\n# 1. Navigate to the skill scripts\ncd {baseDir}/scripts\n\n# 2. Create a virtual environment and install dependencies\npython -m venv venv\nvenv\\Scripts\\pip install -r requirements.txt\n\n# 3. Copy .env.example to .env and fill in your keys\ncopy .env.example .env\n\n# 4. Run the assistant\nvenv\\Scripts\\python src\\assistant.py\n\nRequirements\nService\tWhat you need\tCost\nOpenClaw gateway\tRunning locally on ws://127.0.0.1:18789 with a gateway token\t—\nElevenLabs\tAPI key + voice ID (free tier works with default voices)\tFree+\nPicovoice\tAccess key from picovoice.ai (free tier works)\tFree\nPython\t3.10+ (tested on 3.14)\t—\nMicrophone\tAny input device\t—\nConfiguration (.env)\n# OpenClaw Gateway\nGATEWAY_URL=ws://127.0.0.1:18789\nGATEWAY_TOKEN=your-gateway-token\n\n# ElevenLabs TTS\nELEVENLABS_API_KEY=your-api-key\nELEVENLABS_VOICE_ID=XrExE9yKIg1WjnnlVkGX  # Matilda (free tier) — or MClEFoImJXBTgLwdLI5n for Ivy (paid)\nELEVENLABS_MODEL_ID=eleven_v3\n\n# Porcupine Wake Word\nPORCUPINE_ACCESS_KEY=your-access-key\nPORCUPINE_MODEL_PATH=              # path to custom .ppn file (optional)\n\n# Whisper STT\nWHISPER_MODEL=base                  # tiny, base, small, medium, large\n\n# Tuning\nWAKE_SENSITIVITY=0.7               # 0.0–1.0 (higher = more sensitive)\nSILENCE_TIMEOUT=1.5                # seconds of silence to stop recording\nHOTKEY=ctrl+shift+k                # global keyboard shortcut\n\nCustom Wake Word\nGo to Picovoice Console\nCreate a custom wake word (e.g. \"Hey Claudia\", \"Hey OpenClaw\")\nDownload the .ppn file for your platform\nSet PORCUPINE_MODEL_PATH in .env to the file path\nWithout a custom model, falls back to built-in \"hey google\"\nPersonalized Voice Sounds\n\nThe assistant plays short audio clips when activated (\"Yep!\", \"Hi!\") and while thinking (\"Hmm...\", \"Let me think...\"). Generate these in your chosen ElevenLabs voice:\n\ncd {baseDir}/scripts\nvenv\\Scripts\\python generate_chime_sounds.py\nvenv\\Scripts\\python generate_thinking_sounds.py\n\n\nRe-run these after changing ELEVENLABS_VOICE_ID.\n\nRunning in Background\n\nUse start.bat to launch without a console window (runs via pythonw.exe). The assistant appears as a system tray icon with Pause/Resume/Quit controls.\n\nFor auto-start on Windows, create a shortcut to start.bat in shell:startup.\n\nHow It Works\nWake — Porcupine detects the wake word (or user presses hotkey)\nChime — Plays a random activation sound (\"Yep!\", \"Hi!\")\nRecord — Records speech until 1.5s of silence (2s grace period for initial silence)\nThinking — Plays a filler sound (\"Hmm...\", \"Let me think...\")\nTranscribe — faster-whisper converts audio to text locally (CPU, int8)\nGateway — Sends text to OpenClaw gateway via WebSocket, streams response\nSpeak — ElevenLabs converts response to speech, plays through speakers\nFollow-up — Automatically listens for 5s after speaking for conversation continuity\nIdle — Returns to wake word listening after 5s of silence\n\nMic suppression keeps the microphone muted during all speaker output to prevent feedback loops.\n\nDetailed Architecture\n\nSee references/architecture.md for source file breakdown, WebSocket protocol details, and audio pipeline internals.\n\nTroubleshooting\n\nSee references/troubleshooting.md for common issues with mic detection, gateway connection, TTS errors, and wake word tuning."
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/kurtivy/openclaw-voice-assistant",
    "publisherUrl": "https://clawhub.ai/kurtivy/openclaw-voice-assistant",
    "owner": "kurtivy",
    "version": "1.0.4",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/openclaw-voice-assistant",
    "downloadUrl": "https://openagent3.xyz/downloads/openclaw-voice-assistant",
    "agentUrl": "https://openagent3.xyz/skills/openclaw-voice-assistant/agent",
    "manifestUrl": "https://openagent3.xyz/skills/openclaw-voice-assistant/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/openclaw-voice-assistant/agent.md"
  }
}