{
  "schemaVersion": "1.0",
  "item": {
    "slug": "code2animation",
    "name": "code2animation",
    "source": "tencent",
    "type": "skill",
    "category": "内容创作",
    "sourceUrl": "https://clawhub.ai/etrobot/code2animation",
    "canonicalUrl": "https://clawhub.ai/etrobot/code2animation",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/code2animation",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=code2animation",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "pnpm-lock.yaml",
      "index.html",
      "metadata.json",
      "README.md",
      "package.json",
      "skill.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "slug": "code2animation",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-05-01T02:24:05.944Z",
      "expiresAt": "2026-05-08T02:24:05.944Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=code2animation",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=code2animation",
        "contentDisposition": "attachment; filename=\"code2animation-1.0.1.zip\"",
        "redirectLocation": null,
        "bodySnippet": null,
        "slug": "code2animation"
      },
      "scope": "item",
      "summary": "Item download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this item.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/code2animation"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/code2animation",
    "agentPageUrl": "https://openagent3.xyz/skills/code2animation/agent",
    "manifestUrl": "https://openagent3.xyz/skills/code2animation/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/code2animation/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "AgentSaaS Video Editor Skill",
        "body": "A comprehensive video editing and rendering skill that enables AI agents to create code-driven animations with text-to-speech narration and smooth transitions."
      },
      {
        "title": "Purpose",
        "body": "This skill allows agents to:\n\nCreate and preview interactive video projects with animations\nGenerate TTS audio narration using Microsoft Edge TTS\nRender complete videos with synchronized audio and visual effects\nSupport both portrait and landscape video formats\nApply smooth transition effects between media elements"
      },
      {
        "title": "1. Interactive Video Preview",
        "body": "Real-time preview of video projects in the browser\nPlayback controls for testing and debugging\nSupport for transitions, media clips, and timing adjustments\nFrame-by-frame seeking for precise editing\nLive transition preview with easing effects"
      },
      {
        "title": "2. Transition System",
        "body": "transitionIn: Each media defines its own entrance animation\nSupported transitions: fade, zoom, slide2Left, slideUp, none\nEasing: Built-in easeOutCubic for smooth slide and zoom animations\nstayInClip: Media can persist throughout entire clip duration\nCross-clip transitions: Automatic handling of clip boundaries"
      },
      {
        "title": "3. TTS Audio Generation",
        "body": "Automated text-to-speech using Microsoft Edge TTS (msedge-tts)\nSupport for multiple voices (English and Chinese)\nWord-level timing metadata for lip-sync and animations\nAudio file caching for faster previews"
      },
      {
        "title": "4. Video Rendering",
        "body": "Automated frame-by-frame rendering using Puppeteer\nFFmpeg integration for video encoding and audio mixing\n30 FPS output at 1920x1080 (landscape) or 1080x1920 (portrait)\nDeterministic rendering for consistent results\nTransition effects preserved in final output"
      },
      {
        "title": "Media Item Properties",
        "body": "src: HTML filename in the footage directory\nwords: Trigger phrase from speech that activates this media\ntransitionIn: Entrance animation type (optional)\ntransitionDuration: Duration in seconds (optional, default: 0.6s)\nstayInClip: If true, media remains visible until clip ends (optional)"
      },
      {
        "title": "Transition Types",
        "body": "fade: Opacity transition (0 → 1)\nzoom: Scale transition (2x → 1x) with opacity\nslide2Left: Horizontal slide from right (100% → 0%)\nslideUp: Vertical slide from bottom (100% → 0%)\nnone: No transition effect"
      },
      {
        "title": "Transition Behavior",
        "body": "transitionIn: Defines how this media enters the scene\ntransitionDuration: Duration in seconds (default: 0.6s)\nstayInClip: If true, media remains visible until clip ends\nEasing: slide2Left and slideUp use easeOutCubic for smooth deceleration"
      },
      {
        "title": "Reference Implementation",
        "body": "See public/projects/agentSaasPromoVideo.json for a complete working example demonstrating all transition types and stayInClip behavior."
      },
      {
        "title": "System Dependencies",
        "body": "Node.js: 18 or higher\nFFmpeg: Required for video encoding and audio mixing\nChromium/Chrome: Used by Puppeteer for headless rendering"
      },
      {
        "title": "Node.js Dependencies",
        "body": "React & Vite: Frontend framework and build tool\nPuppeteer: Headless browser for frame capture\nmsedge-tts: Microsoft Edge TTS for audio generation\nExpress: Optional HTTP server (for TTS API endpoint)\nMotion (Framer Motion): Animation library\nTailwind CSS: Styling framework"
      },
      {
        "title": "Shell Commands Used",
        "body": "This skill executes the following shell commands:"
      },
      {
        "title": "Audio Generation",
        "body": "npx tsx scripts/generate-audio.ts <projectId>\n\nReads project JSON configuration\nGenerates MP3 audio files using Edge TTS\nSaves word-level timing metadata"
      },
      {
        "title": "Video Rendering",
        "body": "node scripts/render.js <projectId> [--portrait]\n\nStarts a local Vite dev server\nLaunches Puppeteer to capture frames\nUses FFmpeg to encode video and mix audio\nCleans up temporary files"
      },
      {
        "title": "FFmpeg Operations",
        "body": "Frame encoding: ffmpeg -framerate 30 -i frames/frame-%05d.jpg -c:v libx264 ...\nAudio mixing: ffmpeg -i video.mp4 -i audio1.mp3 -i audio2.mp3 -filter_complex ..."
      },
      {
        "title": "Browser Detection",
        "body": "Uses which command to find Chrome/Chromium on Linux/macOS\nRespects PUPPETEER_EXECUTABLE_PATH environment variable"
      },
      {
        "title": "API Endpoints (Optional)",
        "body": "The skill may expose an HTTP endpoint for TTS generation:\n\nPOST /api/tts\nContent-Type: application/json\n\n{\n  \"text\": \"Text to speak\",\n  \"voice\": \"en-US-GuyNeural\",\n  \"rate\": \"+0%\",\n  \"pitch\": \"+0Hz\"\n}\n\nThis endpoint is optional and only used when pre-generated audio files are not available."
      },
      {
        "title": "File System Access",
        "body": "Reads from: public/projects/<projectId>/\nWrites to: public/projects/<projectId>/audio/, public/video/\nCreates temporary directories for frame storage\nCleans up temporary files after rendering"
      },
      {
        "title": "Network Access",
        "body": "Starts local HTTP server on port 5175+ (configurable)\nConnects to Microsoft Edge TTS service (external)\nNo external API keys required for basic functionality"
      },
      {
        "title": "Process Execution",
        "body": "Spawns child processes for: Vite dev server, FFmpeg encoding\nUses execSync for: browser detection, audio generation trigger\nAll commands are predefined and not user-controllable"
      },
      {
        "title": "Environment Variables",
        "body": "Optional configuration:\n\nPUPPETEER_EXECUTABLE_PATH: Custom browser path for Puppeteer\nFASTMCP_LOG_LEVEL: Logging level (default: ERROR)"
      },
      {
        "title": "Project Structure",
        "body": "public/\n  projects/\n    <projectId>/\n      <projectId>.json       # Project configuration\n      footage/               # HTML/CSS media components\n      audio/                 # Generated TTS audio files\n        0.mp3, 1.mp3, ...\n        0.json, 1.json, ...  # Word timing metadata\n  video/\n    render-<projectId>-landscape.mp4\n    render-<projectId>-portrait.mp4"
      },
      {
        "title": "Usage Example",
        "body": "# 1. Generate audio for a project\npnpm generate-audio agentSaasPromoVideo\n\n# 2. Preview in browser\npnpm dev\n\n# 3. Render final video\npnpm render agentSaasPromoVideo\n\n# 4. Render portrait version\npnpm render agentSaasPromoVideo --portrait"
      },
      {
        "title": "HTML Animation Guidelines",
        "body": "When creating HTML animations for video rendering, use the CSS variable timeline model."
      },
      {
        "title": "Core Model",
        "body": "Renderer controls time: Puppeteer sets --t every frame.\nPage only renders state: DOM = f(t).\nNo lifecycle animation APIs (play/start/reset) and no hidden runtime state.\nTransition system handles entrance effects: Don't implement slide/fade transitions in HTML - use the project's transitionIn property instead."
      },
      {
        "title": "✅ Required Patterns",
        "body": "Define timeline root:\n:root { --t: 0; }\n\n\nEvery animated property must derive from --t.\nAlways clamp normalized progress values:\n--p: clamp(0, calc((var(--t) - var(--start)) / var(--duration)), 1);\n\n\nExpress initial/ending states directly in CSS (seek-safe at any frame).\nUse small deterministic JS only for content mapping (e.g., subtitle/text index from t).\nLet transition system handle entrance: Focus on content animation, not entrance effects."
      },
      {
        "title": "🚫 Forbidden Patterns",
        "body": "transition\nanimation / @keyframes\nwindow.registerFrameAnimation(...)\nrequestAnimationFrame loops for timeline progression\nImplicit time from Date.now() / performance.now() for visual state\nManual entrance transitions: Don't implement slide/fade in HTML - use transitionIn in project config\nFade-out effects: Elements should not disappear after animation completes. Use opacity: var(--p) instead of opacity: calc(var(--p) * (1 - var(--fade))) to keep elements visible at their final state."
      },
      {
        "title": "Recommended Template",
        "body": ".element {\n  --start: 0.5;\n  --duration: 1;\n  --p: clamp(0, calc((var(--t) - var(--start)) / var(--duration)), 1);\n\n  opacity: var(--p);\n  transform: translateY(calc((1 - var(--p)) * 20px));\n}"
      },
      {
        "title": "Easing (without transition)",
        "body": "Use math on progress directly:\n\n--p: clamp(0, calc((var(--t) - var(--start)) / var(--duration)), 1);\n--ease-out: calc(1 - (1 - var(--p)) * (1 - var(--p)));\nopacity: var(--ease-out);"
      },
      {
        "title": "JS Hook Pattern (text/content only)",
        "body": "<script>\n  const labels = ['A', 'B', 'C'];\n  const el = document.getElementById('label');\n\n  window.onTimelineUpdate = (t) => {\n    const idx = Math.floor(Math.max(0, t) / 1.2) % labels.length;\n    el.textContent = labels[idx];\n  };\n</script>"
      },
      {
        "title": "Time Semantics (t vs globalTime)",
        "body": "onTimelineUpdate(t, globalTime) supports two time domains:\n\nt: clip-local time (resets to 0 when clip changes). This is the default for most HTML animations.\nglobalTime: continuous timeline across clips. Use only when an element must stay continuous through cross-clip transitions.\n\n\nDo not assume t is media-local. If a media appears mid-clip, t may already be large when it first becomes visible.\nFor media-local behavior (e.g., toggle starts animating when this media appears), anchor from first visible globalTime and derive:\n\nlocal = globalTime - mediaStartGlobalTime\n\n\nKeep fallback for compatibility:\nwindow.onTimelineUpdate = (t, globalTime) => {\n  const g = Number.isFinite(globalTime) ? globalTime : t;\n  // use `t` for normal clip-local animation, `g` only when continuity is required\n};"
      },
      {
        "title": "Determinism Checklist",
        "body": "Seeking to any t yields exactly one deterministic frame.\nAnimation state must not depend on \"previous frame\".\nCross-clip transition visuals should be continuous in both clips.\nFinal frame (t = totalDuration) must remain on the last clip (no wrap to first clip)."
      },
      {
        "title": "Limitations",
        "body": "Requires FFmpeg to be installed on the system\nTTS generation requires internet connection (Microsoft Edge TTS)\nRendering is CPU-intensive and may take several minutes\nMaximum TTS text length: ~1000 characters per clip\nFrame capture requires sufficient disk space"
      },
      {
        "title": "Transparency Statement",
        "body": "This skill executes shell commands and spawns child processes for video rendering. All operations are limited to:\n\nStarting a local development server (Vite)\nRunning FFmpeg for video encoding\nLaunching Puppeteer for frame capture\nDetecting browser executables on the system\n\nNo arbitrary code execution or user input is passed to shell commands. All file paths and commands are predefined and validated."
      }
    ],
    "body": "AgentSaaS Video Editor Skill\n\nA comprehensive video editing and rendering skill that enables AI agents to create code-driven animations with text-to-speech narration and smooth transitions.\n\nPurpose\n\nThis skill allows agents to:\n\nCreate and preview interactive video projects with animations\nGenerate TTS audio narration using Microsoft Edge TTS\nRender complete videos with synchronized audio and visual effects\nSupport both portrait and landscape video formats\nApply smooth transition effects between media elements\nCore Capabilities\n1. Interactive Video Preview\nReal-time preview of video projects in the browser\nPlayback controls for testing and debugging\nSupport for transitions, media clips, and timing adjustments\nFrame-by-frame seeking for precise editing\nLive transition preview with easing effects\n2. Transition System\ntransitionIn: Each media defines its own entrance animation\nSupported transitions: fade, zoom, slide2Left, slideUp, none\nEasing: Built-in easeOutCubic for smooth slide and zoom animations\nstayInClip: Media can persist throughout entire clip duration\nCross-clip transitions: Automatic handling of clip boundaries\n3. TTS Audio Generation\nAutomated text-to-speech using Microsoft Edge TTS (msedge-tts)\nSupport for multiple voices (English and Chinese)\nWord-level timing metadata for lip-sync and animations\nAudio file caching for faster previews\n4. Video Rendering\nAutomated frame-by-frame rendering using Puppeteer\nFFmpeg integration for video encoding and audio mixing\n30 FPS output at 1920x1080 (landscape) or 1080x1920 (portrait)\nDeterministic rendering for consistent results\nTransition effects preserved in final output\nProject Configuration Format\nMedia Item Properties\nsrc: HTML filename in the footage directory\nwords: Trigger phrase from speech that activates this media\ntransitionIn: Entrance animation type (optional)\ntransitionDuration: Duration in seconds (optional, default: 0.6s)\nstayInClip: If true, media remains visible until clip ends (optional)\nTransition Types\nfade: Opacity transition (0 → 1)\nzoom: Scale transition (2x → 1x) with opacity\nslide2Left: Horizontal slide from right (100% → 0%)\nslideUp: Vertical slide from bottom (100% → 0%)\nnone: No transition effect\nTransition Behavior\ntransitionIn: Defines how this media enters the scene\ntransitionDuration: Duration in seconds (default: 0.6s)\nstayInClip: If true, media remains visible until clip ends\nEasing: slide2Left and slideUp use easeOutCubic for smooth deceleration\nReference Implementation\n\nSee public/projects/agentSaasPromoVideo.json for a complete working example demonstrating all transition types and stayInClip behavior.\n\nTechnical Requirements\nSystem Dependencies\nNode.js: 18 or higher\nFFmpeg: Required for video encoding and audio mixing\nChromium/Chrome: Used by Puppeteer for headless rendering\nNode.js Dependencies\nReact & Vite: Frontend framework and build tool\nPuppeteer: Headless browser for frame capture\nmsedge-tts: Microsoft Edge TTS for audio generation\nExpress: Optional HTTP server (for TTS API endpoint)\nMotion (Framer Motion): Animation library\nTailwind CSS: Styling framework\nShell Commands Used\n\nThis skill executes the following shell commands:\n\nAudio Generation\nnpx tsx scripts/generate-audio.ts <projectId>\n\nReads project JSON configuration\nGenerates MP3 audio files using Edge TTS\nSaves word-level timing metadata\nVideo Rendering\nnode scripts/render.js <projectId> [--portrait]\n\nStarts a local Vite dev server\nLaunches Puppeteer to capture frames\nUses FFmpeg to encode video and mix audio\nCleans up temporary files\nFFmpeg Operations\nFrame encoding: ffmpeg -framerate 30 -i frames/frame-%05d.jpg -c:v libx264 ...\nAudio mixing: ffmpeg -i video.mp4 -i audio1.mp3 -i audio2.mp3 -filter_complex ...\nBrowser Detection\nUses which command to find Chrome/Chromium on Linux/macOS\nRespects PUPPETEER_EXECUTABLE_PATH environment variable\nAPI Endpoints (Optional)\n\nThe skill may expose an HTTP endpoint for TTS generation:\n\nPOST /api/tts\nContent-Type: application/json\n\n{\n  \"text\": \"Text to speak\",\n  \"voice\": \"en-US-GuyNeural\",\n  \"rate\": \"+0%\",\n  \"pitch\": \"+0Hz\"\n}\n\n\nThis endpoint is optional and only used when pre-generated audio files are not available.\n\nSecurity Considerations\nFile System Access\nReads from: public/projects/<projectId>/\nWrites to: public/projects/<projectId>/audio/, public/video/\nCreates temporary directories for frame storage\nCleans up temporary files after rendering\nNetwork Access\nStarts local HTTP server on port 5175+ (configurable)\nConnects to Microsoft Edge TTS service (external)\nNo external API keys required for basic functionality\nProcess Execution\nSpawns child processes for: Vite dev server, FFmpeg encoding\nUses execSync for: browser detection, audio generation trigger\nAll commands are predefined and not user-controllable\nEnvironment Variables\n\nOptional configuration:\n\nPUPPETEER_EXECUTABLE_PATH: Custom browser path for Puppeteer\nFASTMCP_LOG_LEVEL: Logging level (default: ERROR)\nProject Structure\npublic/\n  projects/\n    <projectId>/\n      <projectId>.json       # Project configuration\n      footage/               # HTML/CSS media components\n      audio/                 # Generated TTS audio files\n        0.mp3, 1.mp3, ...\n        0.json, 1.json, ...  # Word timing metadata\n  video/\n    render-<projectId>-landscape.mp4\n    render-<projectId>-portrait.mp4\n\nUsage Example\n# 1. Generate audio for a project\npnpm generate-audio agentSaasPromoVideo\n\n# 2. Preview in browser\npnpm dev\n\n# 3. Render final video\npnpm render agentSaasPromoVideo\n\n# 4. Render portrait version\npnpm render agentSaasPromoVideo --portrait\n\nHTML Animation Guidelines\n\nWhen creating HTML animations for video rendering, use the CSS variable timeline model.\n\nCore Model\nRenderer controls time: Puppeteer sets --t every frame.\nPage only renders state: DOM = f(t).\nNo lifecycle animation APIs (play/start/reset) and no hidden runtime state.\nTransition system handles entrance effects: Don't implement slide/fade transitions in HTML - use the project's transitionIn property instead.\n✅ Required Patterns\nDefine timeline root:\n:root { --t: 0; }\n\nEvery animated property must derive from --t.\nAlways clamp normalized progress values:\n--p: clamp(0, calc((var(--t) - var(--start)) / var(--duration)), 1);\n\nExpress initial/ending states directly in CSS (seek-safe at any frame).\nUse small deterministic JS only for content mapping (e.g., subtitle/text index from t).\nLet transition system handle entrance: Focus on content animation, not entrance effects.\n🚫 Forbidden Patterns\ntransition\nanimation / @keyframes\nwindow.registerFrameAnimation(...)\nrequestAnimationFrame loops for timeline progression\nImplicit time from Date.now() / performance.now() for visual state\nManual entrance transitions: Don't implement slide/fade in HTML - use transitionIn in project config\nFade-out effects: Elements should not disappear after animation completes. Use opacity: var(--p) instead of opacity: calc(var(--p) * (1 - var(--fade))) to keep elements visible at their final state.\nRecommended Template\n.element {\n  --start: 0.5;\n  --duration: 1;\n  --p: clamp(0, calc((var(--t) - var(--start)) / var(--duration)), 1);\n\n  opacity: var(--p);\n  transform: translateY(calc((1 - var(--p)) * 20px));\n}\n\nEasing (without transition)\n\nUse math on progress directly:\n\n--p: clamp(0, calc((var(--t) - var(--start)) / var(--duration)), 1);\n--ease-out: calc(1 - (1 - var(--p)) * (1 - var(--p)));\nopacity: var(--ease-out);\n\nJS Hook Pattern (text/content only)\n<script>\n  const labels = ['A', 'B', 'C'];\n  const el = document.getElementById('label');\n\n  window.onTimelineUpdate = (t) => {\n    const idx = Math.floor(Math.max(0, t) / 1.2) % labels.length;\n    el.textContent = labels[idx];\n  };\n</script>\n\nTime Semantics (t vs globalTime)\nonTimelineUpdate(t, globalTime) supports two time domains:\nt: clip-local time (resets to 0 when clip changes). This is the default for most HTML animations.\nglobalTime: continuous timeline across clips. Use only when an element must stay continuous through cross-clip transitions.\nDo not assume t is media-local. If a media appears mid-clip, t may already be large when it first becomes visible.\nFor media-local behavior (e.g., toggle starts animating when this media appears), anchor from first visible globalTime and derive:\nlocal = globalTime - mediaStartGlobalTime\nKeep fallback for compatibility:\nwindow.onTimelineUpdate = (t, globalTime) => {\n  const g = Number.isFinite(globalTime) ? globalTime : t;\n  // use `t` for normal clip-local animation, `g` only when continuity is required\n};\n\nDeterminism Checklist\nSeeking to any t yields exactly one deterministic frame.\nAnimation state must not depend on \"previous frame\".\nCross-clip transition visuals should be continuous in both clips.\nFinal frame (t = totalDuration) must remain on the last clip (no wrap to first clip).\nLimitations\nRequires FFmpeg to be installed on the system\nTTS generation requires internet connection (Microsoft Edge TTS)\nRendering is CPU-intensive and may take several minutes\nMaximum TTS text length: ~1000 characters per clip\nFrame capture requires sufficient disk space\nTransparency Statement\n\nThis skill executes shell commands and spawns child processes for video rendering. All operations are limited to:\n\nStarting a local development server (Vite)\nRunning FFmpeg for video encoding\nLaunching Puppeteer for frame capture\nDetecting browser executables on the system\n\nNo arbitrary code execution or user input is passed to shell commands. All file paths and commands are predefined and validated."
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/etrobot/code2animation",
    "publisherUrl": "https://clawhub.ai/etrobot/code2animation",
    "owner": "etrobot",
    "version": "1.0.1",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/code2animation",
    "downloadUrl": "https://openagent3.xyz/downloads/code2animation",
    "agentUrl": "https://openagent3.xyz/skills/code2animation/agent",
    "manifestUrl": "https://openagent3.xyz/skills/code2animation/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/code2animation/agent.md"
  }
}