{
  "schemaVersion": "1.0",
  "item": {
    "slug": "video-generation",
    "name": "Video Generation",
    "source": "tencent",
    "type": "skill",
    "category": "内容创作",
    "sourceUrl": "https://clawhub.ai/ivangdavila/video-generation",
    "canonicalUrl": "https://clawhub.ai/ivangdavila/video-generation",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/video-generation",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=video-generation",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SKILL.md",
      "api-patterns.md",
      "benchmarks.md",
      "google-veo.md",
      "kling.md",
      "luma.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-30T16:55:25.780Z",
      "expiresAt": "2026-05-07T16:55:25.780Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
        "contentDisposition": "attachment; filename=\"network-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/video-generation"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/video-generation",
    "agentPageUrl": "https://openagent3.xyz/skills/video-generation/agent",
    "manifestUrl": "https://openagent3.xyz/skills/video-generation/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/video-generation/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Setup",
        "body": "On first use, read setup.md."
      },
      {
        "title": "When to Use",
        "body": "User needs to generate, edit, or scale AI videos with current models and APIs.\nUse this skill to choose the right current model stack, write stronger motion prompts, and run reliable async video pipelines."
      },
      {
        "title": "Architecture",
        "body": "User preferences persist in ~/video-generation/. See memory-template.md for setup.\n\n~/video-generation/\n├── memory.md      # Preferred providers, model routing, reusable shot recipes\n└── history.md     # Optional run log for jobs, costs, and outputs"
      },
      {
        "title": "Quick Reference",
        "body": "TopicFileInitial setupsetup.mdMemory templatememory-template.mdMigration guidemigration.mdModel snapshotbenchmarks.mdAsync API patternsapi-patterns.mdOpenAI Sora 2openai-sora.mdGoogle Veo 3.xgoogle-veo.mdRunway Gen-4runway.mdLuma Rayluma.mdByteDance Seedanceseedance.mdKlingkling.mdViduvidu.mdPika via Falpika.mdMiniMax Hailuominimax-hailuo.mdReplicate routingreplicate.mdOpen-source local modelsopen-source-video.mdDistribution playbookpromotion.md"
      },
      {
        "title": "1. Resolve model aliases before API calls",
        "body": "Map community names to real API model IDs first.\nExamples: sora-2, sora-2-pro, veo-3.0-generate-001, gen4_turbo, gen4_aleph."
      },
      {
        "title": "2. Route by task, not brand preference",
        "body": "TaskFirst choiceBackupPremium prompt-only generationsora-2-proveo-3.1-generate-001Fast drafts at lower costveo-3.1-fast-generate-001gen4_turboLong-form cinematic shotsgen4_alephray-2Strong image-to-video controlveo-3.0-generate-001gen4_turboMulti-shot narrative consistencySeedance familyhailuo-2.3Local privacy-first workflowsWan2.2 / HunyuanVideoCogVideoX"
      },
      {
        "title": "3. Draft cheap, finish expensive",
        "body": "Start with low duration and lower tier, validate motion and composition, then rerender winners with premium models or longer durations."
      },
      {
        "title": "4. Design prompts as shot instructions",
        "body": "Always include subject, action, camera motion, lens style, lighting, and scene timing.\nFor references and start/end frames, keep continuity constraints explicit."
      },
      {
        "title": "5. Assume async and failure by default",
        "body": "Every provider pipeline must support queued jobs, polling/backoff, retries, cancellation, and signed-URL download before expiry."
      },
      {
        "title": "6. Keep a fallback chain",
        "body": "If the preferred model is blocked or overloaded:\n\nsame provider lower tier, 2) equivalent cross-provider model, 3) open model/local run."
      },
      {
        "title": "Common Traps",
        "body": "Using nickname-only model labels in code -> avoidable API failures\nPushing 8-10 second generations before validating a 3-5 second draft -> wasted credits\nCropping after generation instead of generating native ratio -> lower composition quality\nIgnoring prompt enhancement toggles -> tone drift across providers\nReusing expired output URLs -> broken export workflows\nTreating all providers as synchronous -> stalled jobs and bad timeout handling"
      },
      {
        "title": "External Endpoints",
        "body": "ProviderEndpointData SentPurposeOpenAIapi.openai.comPrompt text, optional input images/video refsSora 2 video generationGoogle Vertex AIaiplatform.googleapis.comPrompt text, optional image input, generation paramsVeo 3.x generationRunwayapi.dev.runwayml.comPrompt text, optional input mediaGen-4 generation and image-to-videoLumaapi.lumalabs.aiPrompt text, optional keyframes/start-end imagesRay generationFalqueue.fal.runPrompt text, optional input mediaPika and Hailuo hosted APIsReplicateapi.replicate.comPrompt text, optional input mediaMulti-model routing and experimentationViduapi.vidu.comPrompt text, optional start/end/reference imagesVidu text/image/reference video APIsTencent MPSmps.tencentcloudapi.comPrompt text and generation parametersUnified AIGC video task APIs\n\nNo other data is sent externally."
      },
      {
        "title": "Security & Privacy",
        "body": "Data that leaves your machine:\n\nPrompt text\nOptional reference images or clips\nRequested rendering parameters (duration, resolution, aspect ratio)\n\nData that stays local:\n\nProvider preferences in ~/video-generation/memory.md\nOptional local job history in ~/video-generation/history.md\n\nThis skill does NOT:\n\nStore API keys in project files\nUpload media outside requested provider calls\nDelete local assets unless the user asks"
      },
      {
        "title": "Trust",
        "body": "This skill can send prompts and media references to third-party AI providers.\nOnly install if you trust those providers with your content."
      },
      {
        "title": "Related Skills",
        "body": "Install with clawhub install <slug> if user confirms:\n\nimage-generation - Build still concepts and keyframes before video generation\nimage-edit - Prepare clean references, masks, and style frames\nvideo-edit - Post-process generated clips and final exports\nvideo-captions - Add subtitle and text overlay workflows\nffmpeg - Compose, transcode, and package production outputs"
      },
      {
        "title": "Feedback",
        "body": "If useful: clawhub star video-generation\nStay updated: clawhub sync"
      }
    ],
    "body": "Setup\n\nOn first use, read setup.md.\n\nWhen to Use\n\nUser needs to generate, edit, or scale AI videos with current models and APIs. Use this skill to choose the right current model stack, write stronger motion prompts, and run reliable async video pipelines.\n\nArchitecture\n\nUser preferences persist in ~/video-generation/. See memory-template.md for setup.\n\n~/video-generation/\n├── memory.md      # Preferred providers, model routing, reusable shot recipes\n└── history.md     # Optional run log for jobs, costs, and outputs\n\nQuick Reference\nTopic\tFile\nInitial setup\tsetup.md\nMemory template\tmemory-template.md\nMigration guide\tmigration.md\nModel snapshot\tbenchmarks.md\nAsync API patterns\tapi-patterns.md\nOpenAI Sora 2\topenai-sora.md\nGoogle Veo 3.x\tgoogle-veo.md\nRunway Gen-4\trunway.md\nLuma Ray\tluma.md\nByteDance Seedance\tseedance.md\nKling\tkling.md\nVidu\tvidu.md\nPika via Fal\tpika.md\nMiniMax Hailuo\tminimax-hailuo.md\nReplicate routing\treplicate.md\nOpen-source local models\topen-source-video.md\nDistribution playbook\tpromotion.md\nCore Rules\n1. Resolve model aliases before API calls\n\nMap community names to real API model IDs first. Examples: sora-2, sora-2-pro, veo-3.0-generate-001, gen4_turbo, gen4_aleph.\n\n2. Route by task, not brand preference\nTask\tFirst choice\tBackup\nPremium prompt-only generation\tsora-2-pro\tveo-3.1-generate-001\nFast drafts at lower cost\tveo-3.1-fast-generate-001\tgen4_turbo\nLong-form cinematic shots\tgen4_aleph\tray-2\nStrong image-to-video control\tveo-3.0-generate-001\tgen4_turbo\nMulti-shot narrative consistency\tSeedance family\thailuo-2.3\nLocal privacy-first workflows\tWan2.2 / HunyuanVideo\tCogVideoX\n3. Draft cheap, finish expensive\n\nStart with low duration and lower tier, validate motion and composition, then rerender winners with premium models or longer durations.\n\n4. Design prompts as shot instructions\n\nAlways include subject, action, camera motion, lens style, lighting, and scene timing. For references and start/end frames, keep continuity constraints explicit.\n\n5. Assume async and failure by default\n\nEvery provider pipeline must support queued jobs, polling/backoff, retries, cancellation, and signed-URL download before expiry.\n\n6. Keep a fallback chain\n\nIf the preferred model is blocked or overloaded:\n\nsame provider lower tier, 2) equivalent cross-provider model, 3) open model/local run.\nCommon Traps\nUsing nickname-only model labels in code -> avoidable API failures\nPushing 8-10 second generations before validating a 3-5 second draft -> wasted credits\nCropping after generation instead of generating native ratio -> lower composition quality\nIgnoring prompt enhancement toggles -> tone drift across providers\nReusing expired output URLs -> broken export workflows\nTreating all providers as synchronous -> stalled jobs and bad timeout handling\nExternal Endpoints\nProvider\tEndpoint\tData Sent\tPurpose\nOpenAI\tapi.openai.com\tPrompt text, optional input images/video refs\tSora 2 video generation\nGoogle Vertex AI\taiplatform.googleapis.com\tPrompt text, optional image input, generation params\tVeo 3.x generation\nRunway\tapi.dev.runwayml.com\tPrompt text, optional input media\tGen-4 generation and image-to-video\nLuma\tapi.lumalabs.ai\tPrompt text, optional keyframes/start-end images\tRay generation\nFal\tqueue.fal.run\tPrompt text, optional input media\tPika and Hailuo hosted APIs\nReplicate\tapi.replicate.com\tPrompt text, optional input media\tMulti-model routing and experimentation\nVidu\tapi.vidu.com\tPrompt text, optional start/end/reference images\tVidu text/image/reference video APIs\nTencent MPS\tmps.tencentcloudapi.com\tPrompt text and generation parameters\tUnified AIGC video task APIs\n\nNo other data is sent externally.\n\nSecurity & Privacy\n\nData that leaves your machine:\n\nPrompt text\nOptional reference images or clips\nRequested rendering parameters (duration, resolution, aspect ratio)\n\nData that stays local:\n\nProvider preferences in ~/video-generation/memory.md\nOptional local job history in ~/video-generation/history.md\n\nThis skill does NOT:\n\nStore API keys in project files\nUpload media outside requested provider calls\nDelete local assets unless the user asks\nTrust\n\nThis skill can send prompts and media references to third-party AI providers. Only install if you trust those providers with your content.\n\nRelated Skills\n\nInstall with clawhub install <slug> if user confirms:\n\nimage-generation - Build still concepts and keyframes before video generation\nimage-edit - Prepare clean references, masks, and style frames\nvideo-edit - Post-process generated clips and final exports\nvideo-captions - Add subtitle and text overlay workflows\nffmpeg - Compose, transcode, and package production outputs\nFeedback\nIf useful: clawhub star video-generation\nStay updated: clawhub sync"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/ivangdavila/video-generation",
    "publisherUrl": "https://clawhub.ai/ivangdavila/video-generation",
    "owner": "ivangdavila",
    "version": "1.0.1",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/video-generation",
    "downloadUrl": "https://openagent3.xyz/downloads/video-generation",
    "agentUrl": "https://openagent3.xyz/skills/video-generation/agent",
    "manifestUrl": "https://openagent3.xyz/skills/video-generation/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/video-generation/agent.md"
  }
}