{
  "schemaVersion": "1.0",
  "item": {
    "slug": "eachlabs-video-generation",
    "name": "Eachlabs Video Generation",
    "source": "tencent",
    "type": "skill",
    "category": "内容创作",
    "sourceUrl": "https://clawhub.ai/eftalyurtseven/eachlabs-video-generation",
    "canonicalUrl": "https://clawhub.ai/eftalyurtseven/eachlabs-video-generation",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/eachlabs-video-generation",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=eachlabs-video-generation",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SKILL.md",
      "references/MODELS.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-23T16:43:11.935Z",
      "expiresAt": "2026-04-30T16:43:11.935Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
        "contentDisposition": "attachment; filename=\"4claw-imageboard-1.0.1.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/eachlabs-video-generation"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/eachlabs-video-generation",
    "agentPageUrl": "https://openagent3.xyz/skills/eachlabs-video-generation/agent",
    "manifestUrl": "https://openagent3.xyz/skills/eachlabs-video-generation/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/eachlabs-video-generation/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "EachLabs Video Generation",
        "body": "Generate new videos from text prompts, images, or reference inputs using 165+ AI models via the EachLabs Predictions API. For editing existing videos (upscaling, lip sync, extension, subtitles), see the eachlabs-video-edit skill."
      },
      {
        "title": "Authentication",
        "body": "Header: X-API-Key: <your-api-key>\n\nSet the EACHLABS_API_KEY environment variable or pass it directly. Get your key at eachlabs.ai."
      },
      {
        "title": "1. Create a Prediction",
        "body": "curl -X POST https://api.eachlabs.ai/v1/prediction \\\n  -H \"Content-Type: application/json\" \\\n  -H \"X-API-Key: $EACHLABS_API_KEY\" \\\n  -d '{\n    \"model\": \"pixverse-v5-6-text-to-video\",\n    \"version\": \"0.0.1\",\n    \"input\": {\n      \"prompt\": \"A golden retriever running through a meadow at sunset, cinematic slow motion\",\n      \"resolution\": \"720p\",\n      \"duration\": \"5\",\n      \"aspect_ratio\": \"16:9\"\n    }\n  }'"
      },
      {
        "title": "2. Poll for Result",
        "body": "curl https://api.eachlabs.ai/v1/prediction/{prediction_id} \\\n  -H \"X-API-Key: $EACHLABS_API_KEY\"\n\nPoll until status is \"success\" or \"failed\". The output video URL is in the response."
      },
      {
        "title": "Text-to-Video",
        "body": "ModelSlugBest ForPixverse v5.6pixverse-v5-6-text-to-videoGeneral purpose, audio generationXAI Grok Imaginexai-grok-imagine-text-to-videoFast creativeKandinsky 5 Prokandinsky5-pro-text-to-videoArtistic, high qualitySeedance v1.5 Proseedance-v1-5-pro-text-to-videoCinematic qualityWan v2.6wan-v2-6-text-to-videoLong/narrative contentKling v2.6 Prokling-v2-6-pro-text-to-videoMotion controlPika v2.2pika-v2-2-text-to-videoStylized, effectsMinimax Hailuo V2.3 Prominimax-hailuo-v2-3-pro-text-to-videoHigh fidelitySora 2 Prosora-2-text-to-video-proPremium qualityVeo 3veo-3Google's best qualityVeo 3.1veo3-1-text-to-videoLatest Google modelLTX v2 Fastltx-v-2-text-to-video-fastFastest generationMoonvalley Mareymoonvalley-marey-text-to-videoCinematic styleOviovi-text-to-videoGeneral purpose"
      },
      {
        "title": "Image-to-Video",
        "body": "ModelSlugBest ForPixverse v5.6pixverse-v5-6-image-to-videoGeneral purposeXAI Grok Imaginexai-grok-imagine-image-to-videoCreative editsWan v2.6 Flashwan-v2-6-image-to-video-flashFastestWan v2.6wan-v2-6-image-to-videoHigh qualitySeedance v1.5 Proseedance-v1-5-pro-image-to-videoCinematicKandinsky 5 Prokandinsky5-pro-image-to-videoArtisticKling v2.6 Pro I2Vkling-v2-6-pro-image-to-videoBest Kling qualityKling O1kling-o1-image-to-videoLatest Kling modelPika v2.2 I2Vpika-v2-2-image-to-videoEffects, PikaScenesMinimax Hailuo V2.3 Prominimax-hailuo-v2-3-pro-image-to-videoHigh fidelitySora 2 I2Vsora-2-image-to-videoPremium qualityVeo 3.1 I2Vveo3-1-image-to-videoGoogle's latestRunway Gen4 Turbogen4-turboFast, film qualityVeed Fabric 1.0veed-fabric-1-0Social media"
      },
      {
        "title": "Transitions & Effects",
        "body": "ModelSlugBest ForPixverse v5.6 Transitionpixverse-v5-6-transitionSmooth transitionsPika v2.2 PikaScenespika-v2-2-pikascenesScene effectsPixverse v4.5 Effectpixverse-v4-5-effectVideo effectsVeo 3.1 First Last Frameveo3-1-first-last-frame-to-videoInterpolation"
      },
      {
        "title": "Motion Control & Animation",
        "body": "ModelSlugBest ForKling v2.6 Pro Motionkling-v2-6-pro-motion-controlPro motion controlKling v2.6 Standard Motionkling-v2-6-standard-motion-controlStandard motionMotion Fastmotion-fastFast motion transferMotion Video 14Bmotion-video-14bHigh quality motionWan v2.6 R2Vwan-v2-6-reference-to-videoReference-basedKling O1 Reference I2Vkling-o1-reference-image-to-videoReference-based"
      },
      {
        "title": "Talking Head & Lip Sync",
        "body": "ModelSlugBest ForBytedance Omnihuman v1.5bytedance-omnihuman-v1-5Full body animationCreatify Auroracreatify-auroraAudio-driven avatarInfinitalk I2Vinfinitalk-image-to-videoImage talking headInfinitalk V2Vinfinitalk-video-to-videoVideo talking headSync Lipsync v2 Prosync-lipsync-v2-proLip syncKling Avatar v2 Prokling-avatar-v2-proPro avatarKling Avatar v2 Standardkling-avatar-v2-standardStandard avatarEchomimic V3echomimic-v3Face animationStable Avatarstable-avatarStable talking head"
      },
      {
        "title": "Prediction Flow",
        "body": "Check model GET https://api.eachlabs.ai/v1/model?slug=<slug> — validates the model exists and returns the request_schema with exact input parameters. Always do this before creating a prediction to ensure correct inputs.\nPOST https://api.eachlabs.ai/v1/prediction with model slug, version \"0.0.1\", and input parameters matching the schema\nPoll GET https://api.eachlabs.ai/v1/prediction/{id} until status is \"success\" or \"failed\"\nExtract the output video URL from the response"
      },
      {
        "title": "Image-to-Video with Wan v2.6 Flash",
        "body": "curl -X POST https://api.eachlabs.ai/v1/prediction \\\n  -H \"Content-Type: application/json\" \\\n  -H \"X-API-Key: $EACHLABS_API_KEY\" \\\n  -d '{\n    \"model\": \"wan-v2-6-image-to-video-flash\",\n    \"version\": \"0.0.1\",\n    \"input\": {\n      \"image_url\": \"https://example.com/photo.jpg\",\n      \"prompt\": \"The person turns to face the camera and smiles\",\n      \"duration\": \"5\",\n      \"resolution\": \"1080p\"\n    }\n  }'"
      },
      {
        "title": "Video Transition with Pixverse",
        "body": "curl -X POST https://api.eachlabs.ai/v1/prediction \\\n  -H \"Content-Type: application/json\" \\\n  -H \"X-API-Key: $EACHLABS_API_KEY\" \\\n  -d '{\n    \"model\": \"pixverse-v5-6-transition\",\n    \"version\": \"0.0.1\",\n    \"input\": {\n      \"prompt\": \"Smooth morphing transition between the two images\",\n      \"first_image_url\": \"https://example.com/start.jpg\",\n      \"end_image_url\": \"https://example.com/end.jpg\",\n      \"duration\": \"5\",\n      \"resolution\": \"720p\"\n    }\n  }'"
      },
      {
        "title": "Motion Control with Kling v2.6",
        "body": "curl -X POST https://api.eachlabs.ai/v1/prediction \\\n  -H \"Content-Type: application/json\" \\\n  -H \"X-API-Key: $EACHLABS_API_KEY\" \\\n  -d '{\n    \"model\": \"kling-v2-6-pro-motion-control\",\n    \"version\": \"0.0.1\",\n    \"input\": {\n      \"image_url\": \"https://example.com/character.jpg\",\n      \"video_url\": \"https://example.com/dance-reference.mp4\",\n      \"character_orientation\": \"video\"\n    }\n  }'"
      },
      {
        "title": "Talking Head with Omnihuman",
        "body": "curl -X POST https://api.eachlabs.ai/v1/prediction \\\n  -H \"Content-Type: application/json\" \\\n  -H \"X-API-Key: $EACHLABS_API_KEY\" \\\n  -d '{\n    \"model\": \"bytedance-omnihuman-v1-5\",\n    \"version\": \"0.0.1\",\n    \"input\": {\n      \"image_url\": \"https://example.com/portrait.jpg\",\n      \"audio_url\": \"https://example.com/speech.mp3\",\n      \"resolution\": \"1080p\"\n    }\n  }'"
      },
      {
        "title": "Prompt Tips",
        "body": "Be specific about motion: \"camera slowly pans left\" rather than \"nice camera movement\"\nInclude style keywords: \"cinematic\", \"anime\", \"3D animation\", \"cyberpunk\"\nDescribe timing: \"slow motion\", \"time-lapse\", \"fast-paced\"\nFor image-to-video, describe what should change from the static image\nUse negative prompts to avoid unwanted elements (where supported)"
      },
      {
        "title": "Parameter Reference",
        "body": "See references/MODELS.md for complete parameter details for each model."
      }
    ],
    "body": "EachLabs Video Generation\n\nGenerate new videos from text prompts, images, or reference inputs using 165+ AI models via the EachLabs Predictions API. For editing existing videos (upscaling, lip sync, extension, subtitles), see the eachlabs-video-edit skill.\n\nAuthentication\nHeader: X-API-Key: <your-api-key>\n\n\nSet the EACHLABS_API_KEY environment variable or pass it directly. Get your key at eachlabs.ai.\n\nQuick Start\n1. Create a Prediction\ncurl -X POST https://api.eachlabs.ai/v1/prediction \\\n  -H \"Content-Type: application/json\" \\\n  -H \"X-API-Key: $EACHLABS_API_KEY\" \\\n  -d '{\n    \"model\": \"pixverse-v5-6-text-to-video\",\n    \"version\": \"0.0.1\",\n    \"input\": {\n      \"prompt\": \"A golden retriever running through a meadow at sunset, cinematic slow motion\",\n      \"resolution\": \"720p\",\n      \"duration\": \"5\",\n      \"aspect_ratio\": \"16:9\"\n    }\n  }'\n\n2. Poll for Result\ncurl https://api.eachlabs.ai/v1/prediction/{prediction_id} \\\n  -H \"X-API-Key: $EACHLABS_API_KEY\"\n\n\nPoll until status is \"success\" or \"failed\". The output video URL is in the response.\n\nModel Selection Guide\nText-to-Video\nModel\tSlug\tBest For\nPixverse v5.6\tpixverse-v5-6-text-to-video\tGeneral purpose, audio generation\nXAI Grok Imagine\txai-grok-imagine-text-to-video\tFast creative\nKandinsky 5 Pro\tkandinsky5-pro-text-to-video\tArtistic, high quality\nSeedance v1.5 Pro\tseedance-v1-5-pro-text-to-video\tCinematic quality\nWan v2.6\twan-v2-6-text-to-video\tLong/narrative content\nKling v2.6 Pro\tkling-v2-6-pro-text-to-video\tMotion control\nPika v2.2\tpika-v2-2-text-to-video\tStylized, effects\nMinimax Hailuo V2.3 Pro\tminimax-hailuo-v2-3-pro-text-to-video\tHigh fidelity\nSora 2 Pro\tsora-2-text-to-video-pro\tPremium quality\nVeo 3\tveo-3\tGoogle's best quality\nVeo 3.1\tveo3-1-text-to-video\tLatest Google model\nLTX v2 Fast\tltx-v-2-text-to-video-fast\tFastest generation\nMoonvalley Marey\tmoonvalley-marey-text-to-video\tCinematic style\nOvi\tovi-text-to-video\tGeneral purpose\nImage-to-Video\nModel\tSlug\tBest For\nPixverse v5.6\tpixverse-v5-6-image-to-video\tGeneral purpose\nXAI Grok Imagine\txai-grok-imagine-image-to-video\tCreative edits\nWan v2.6 Flash\twan-v2-6-image-to-video-flash\tFastest\nWan v2.6\twan-v2-6-image-to-video\tHigh quality\nSeedance v1.5 Pro\tseedance-v1-5-pro-image-to-video\tCinematic\nKandinsky 5 Pro\tkandinsky5-pro-image-to-video\tArtistic\nKling v2.6 Pro I2V\tkling-v2-6-pro-image-to-video\tBest Kling quality\nKling O1\tkling-o1-image-to-video\tLatest Kling model\nPika v2.2 I2V\tpika-v2-2-image-to-video\tEffects, PikaScenes\nMinimax Hailuo V2.3 Pro\tminimax-hailuo-v2-3-pro-image-to-video\tHigh fidelity\nSora 2 I2V\tsora-2-image-to-video\tPremium quality\nVeo 3.1 I2V\tveo3-1-image-to-video\tGoogle's latest\nRunway Gen4 Turbo\tgen4-turbo\tFast, film quality\nVeed Fabric 1.0\tveed-fabric-1-0\tSocial media\nTransitions & Effects\nModel\tSlug\tBest For\nPixverse v5.6 Transition\tpixverse-v5-6-transition\tSmooth transitions\nPika v2.2 PikaScenes\tpika-v2-2-pikascenes\tScene effects\nPixverse v4.5 Effect\tpixverse-v4-5-effect\tVideo effects\nVeo 3.1 First Last Frame\tveo3-1-first-last-frame-to-video\tInterpolation\nMotion Control & Animation\nModel\tSlug\tBest For\nKling v2.6 Pro Motion\tkling-v2-6-pro-motion-control\tPro motion control\nKling v2.6 Standard Motion\tkling-v2-6-standard-motion-control\tStandard motion\nMotion Fast\tmotion-fast\tFast motion transfer\nMotion Video 14B\tmotion-video-14b\tHigh quality motion\nWan v2.6 R2V\twan-v2-6-reference-to-video\tReference-based\nKling O1 Reference I2V\tkling-o1-reference-image-to-video\tReference-based\nTalking Head & Lip Sync\nModel\tSlug\tBest For\nBytedance Omnihuman v1.5\tbytedance-omnihuman-v1-5\tFull body animation\nCreatify Aurora\tcreatify-aurora\tAudio-driven avatar\nInfinitalk I2V\tinfinitalk-image-to-video\tImage talking head\nInfinitalk V2V\tinfinitalk-video-to-video\tVideo talking head\nSync Lipsync v2 Pro\tsync-lipsync-v2-pro\tLip sync\nKling Avatar v2 Pro\tkling-avatar-v2-pro\tPro avatar\nKling Avatar v2 Standard\tkling-avatar-v2-standard\tStandard avatar\nEchomimic V3\techomimic-v3\tFace animation\nStable Avatar\tstable-avatar\tStable talking head\nPrediction Flow\nCheck model GET https://api.eachlabs.ai/v1/model?slug=<slug> — validates the model exists and returns the request_schema with exact input parameters. Always do this before creating a prediction to ensure correct inputs.\nPOST https://api.eachlabs.ai/v1/prediction with model slug, version \"0.0.1\", and input parameters matching the schema\nPoll GET https://api.eachlabs.ai/v1/prediction/{id} until status is \"success\" or \"failed\"\nExtract the output video URL from the response\nExamples\nImage-to-Video with Wan v2.6 Flash\ncurl -X POST https://api.eachlabs.ai/v1/prediction \\\n  -H \"Content-Type: application/json\" \\\n  -H \"X-API-Key: $EACHLABS_API_KEY\" \\\n  -d '{\n    \"model\": \"wan-v2-6-image-to-video-flash\",\n    \"version\": \"0.0.1\",\n    \"input\": {\n      \"image_url\": \"https://example.com/photo.jpg\",\n      \"prompt\": \"The person turns to face the camera and smiles\",\n      \"duration\": \"5\",\n      \"resolution\": \"1080p\"\n    }\n  }'\n\nVideo Transition with Pixverse\ncurl -X POST https://api.eachlabs.ai/v1/prediction \\\n  -H \"Content-Type: application/json\" \\\n  -H \"X-API-Key: $EACHLABS_API_KEY\" \\\n  -d '{\n    \"model\": \"pixverse-v5-6-transition\",\n    \"version\": \"0.0.1\",\n    \"input\": {\n      \"prompt\": \"Smooth morphing transition between the two images\",\n      \"first_image_url\": \"https://example.com/start.jpg\",\n      \"end_image_url\": \"https://example.com/end.jpg\",\n      \"duration\": \"5\",\n      \"resolution\": \"720p\"\n    }\n  }'\n\nMotion Control with Kling v2.6\ncurl -X POST https://api.eachlabs.ai/v1/prediction \\\n  -H \"Content-Type: application/json\" \\\n  -H \"X-API-Key: $EACHLABS_API_KEY\" \\\n  -d '{\n    \"model\": \"kling-v2-6-pro-motion-control\",\n    \"version\": \"0.0.1\",\n    \"input\": {\n      \"image_url\": \"https://example.com/character.jpg\",\n      \"video_url\": \"https://example.com/dance-reference.mp4\",\n      \"character_orientation\": \"video\"\n    }\n  }'\n\nTalking Head with Omnihuman\ncurl -X POST https://api.eachlabs.ai/v1/prediction \\\n  -H \"Content-Type: application/json\" \\\n  -H \"X-API-Key: $EACHLABS_API_KEY\" \\\n  -d '{\n    \"model\": \"bytedance-omnihuman-v1-5\",\n    \"version\": \"0.0.1\",\n    \"input\": {\n      \"image_url\": \"https://example.com/portrait.jpg\",\n      \"audio_url\": \"https://example.com/speech.mp3\",\n      \"resolution\": \"1080p\"\n    }\n  }'\n\nPrompt Tips\nBe specific about motion: \"camera slowly pans left\" rather than \"nice camera movement\"\nInclude style keywords: \"cinematic\", \"anime\", \"3D animation\", \"cyberpunk\"\nDescribe timing: \"slow motion\", \"time-lapse\", \"fast-paced\"\nFor image-to-video, describe what should change from the static image\nUse negative prompts to avoid unwanted elements (where supported)\nParameter Reference\n\nSee references/MODELS.md for complete parameter details for each model."
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/eftalyurtseven/eachlabs-video-generation",
    "publisherUrl": "https://clawhub.ai/eftalyurtseven/eachlabs-video-generation",
    "owner": "eftalyurtseven",
    "version": "0.1.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/eachlabs-video-generation",
    "downloadUrl": "https://openagent3.xyz/downloads/eachlabs-video-generation",
    "agentUrl": "https://openagent3.xyz/skills/eachlabs-video-generation/agent",
    "manifestUrl": "https://openagent3.xyz/skills/eachlabs-video-generation/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/eachlabs-video-generation/agent.md"
  }
}