{
  "schemaVersion": "1.0",
  "item": {
    "slug": "ima-video-ai",
    "name": "IMA Studio Video Generation",
    "source": "tencent",
    "type": "skill",
    "category": "开发工具",
    "sourceUrl": "https://clawhub.ai/allenfancy-gan/ima-video-ai",
    "canonicalUrl": "https://clawhub.ai/allenfancy-gan/ima-video-ai",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/ima-video-ai",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=ima-video-ai",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "_meta.json",
      "requirements.txt",
      "clawhub.json",
      "SKILL.md",
      "scripts/ima_logger.py",
      "scripts/ima_video_create.py"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-30T16:55:25.780Z",
      "expiresAt": "2026-05-07T16:55:25.780Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
        "contentDisposition": "attachment; filename=\"network-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/ima-video-ai"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/ima-video-ai",
    "agentPageUrl": "https://openagent3.xyz/skills/ima-video-ai/agent",
    "manifestUrl": "https://openagent3.xyz/skills/ima-video-ai/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/ima-video-ai/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "📋 Before you install",
        "body": "Credentials: This skill requires an IMA API key at runtime (IMA_API_KEY or --api-key). The key is sent to api.imastudio.com (main API) and imapi.liveme.com (image uploads). Obtain keys at https://imastudio.com. Use a scoped or test key if you want to limit exposure.\nLocal files: The skill reads local image files you provide (for image-to-video); it also writes logs under ~/.openclaw/logs/ima_skills/ and preferences to ~/.openclaw/memory/ima_prefs.json. Do not point it at sensitive paths.\nCross-skill reads: If ima-knowledge-ai is installed, this skill instructs the agent to read that skill's reference files (~/.openclaw/skills/ima-knowledge-ai/references/*) for workflow and visual-consistency guidance. If you do not have or trust that skill, skip those steps and use this skill's built-in defaults and tables."
      },
      {
        "title": "⚠️ 重要：模型 ID 参考",
        "body": "CRITICAL: When calling the script, you MUST use the exact model_id (second/third column), NOT the friendly model name. Do NOT infer model_id from the friendly name.\n\nQuick Reference Table:\n\n友好名称 (Friendly Name)model_id (t2v)model_id (i2v)说明 (Notes)Wan 2.6wan2.6-t2vwan2.6-i2v⚠️ Note -t2v/-i2v suffixKling O1kling-video-o1kling-video-o1⚠️ Note video- prefixKling 2.6kling-v2-6kling-v2-6⚠️ Note v prefixHailuo 2.3MiniMax-Hailuo-2.3MiniMax-Hailuo-2.3⚠️ Note MiniMax- prefixHailuo 2.0MiniMax-Hailuo-02MiniMax-Hailuo-02⚠️ Note 02 not 2.0Vidu Q2viduq2viduq2-pro⚠️ Different for t2v/i2vGoogle Veo 3.1veo-3.1-generate-previewveo-3.1-generate-preview⚠️ Note -generate-preview suffixSora 2 Prosora-2-prosora-2-pro✅ StraightforwardPixversepixversepixverse✅ Same as friendly nameSeeDance 1.5 Prodoubao-seedance-1.5-prodoubao-seedance-1.5-pro⚠️ Note doubao- prefix\n\nUser Input Variations Handled by Agent:\n\n\"万\" / \"万2.6\" / \"Wan\" → Wan 2.6 → wan2.6-t2v / wan2.6-i2v\n\"可灵\" / \"可灵O1\" / \"Kling O1\" → kling-video-o1\n\"可灵2.6\" / \"Kling 2.6\" → kling-v2-6\n\"海螺\" / \"海螺2.3\" / \"Hailuo\" → MiniMax-Hailuo-2.3\n\"Veo\" / \"Google Veo\" → veo-3.1-generate-preview\n\nHow to get the correct model_id:\n\nCheck this table first\nUse --list-models --task-type text_to_video (or image_to_video)\nRefer to command examples below\n\nExample:\n\n# ❌ WRONG: Inferring from friendly name\n--model-id kling-o1\n\n# ✅ CORRECT: Using exact model_id from table\n--model-id kling-video-o1"
      },
      {
        "title": "⚠️ MANDATORY PRE-CHECK: Read Knowledge Base First!",
        "body": "If ima-knowledge-ai is not installed: Skip all \"Read …\" steps below; use only this SKILL's default models and the 📥 User Input Parsing tables for task_type, model_id, and parameters.\n\nBEFORE executing ANY video generation task, you MUST:\n\nCRITICAL: Understand video modes — Read ima-knowledge-ai/references/video-modes.md:\n\nimage_to_video = first frame to video (输入图成为第1帧)\nreference_image_to_video = reference appearance to video (输入图是视觉参考，不是第1帧)\nThese are COMPLETELY DIFFERENT concepts!\nWrong mode choice = wrong result\n\n\n\nCheck for visual consistency needs — Read ima-knowledge-ai/references/visual-consistency.md if:\n\nUser mentions: \"系列\"、\"分镜\"、\"同一个\"、\"角色\"、\"续\"、\"多个镜头\"\nTask involves: multi-shot videos, character continuity, scene consistency\nSecond+ request about same subject (e.g., \"旺财在游泳\" after \"生成旺财照片\")\n\n\n\nCheck workflow/model/parameters — Read relevant ima-knowledge-ai/references/ sections if:\n\nComplex multi-step video production\nUnsure which model to use\nNeed parameter guidance (duration, resolution, reference strength)\n\nWhy this matters:\n\nAI video generation defaults to 独立生成 (independent generation) each time\nWithout reference images, \"same character/scene\" will look completely different\nText-to-video CANNOT maintain visual consistency — must use image-based modes\n\nExample failure case:\n\nUser: \"生成一只小狗，叫旺财\" \n  → You: generate dog image A\n\nUser: \"生成旺财在游泳的视频\"\n  → ❌ Wrong: text_to_video \"狗在游泳\" (new dog, different from A)\n  → ✅ Right: read visual-consistency.md + video-modes.md → \n             use image_to_video with image A as first frame\n\nHow to check:\n\n# Step 1: Read knowledge base\nread(\"~/.openclaw/skills/ima-knowledge-ai/references/video-modes.md\")\nread(\"~/.openclaw/skills/ima-knowledge-ai/references/visual-consistency.md\")\n\n# Step 2: Identify if reference image needed\nif \"same subject\" or \"series\" or \"character continuity\":\n    # Use image-based mode with previous result as reference\n    reference_image = previous_generation_result\n    \n    # Choose mode based on requirement\n    if \"reference becomes first frame\":\n        use_image_to_video(prompt, reference_image)\n    else:\n        use_reference_image_to_video(prompt, reference_image, reference_strength=0.8)\nelse:\n    # OK to use text-to-video\n    use_text_to_video(prompt)\n\nNo exceptions — if you skip this check and generate visually inconsistent results, that's a bug."
      },
      {
        "title": "📥 User Input Parsing (Model & Parameter Recognition)",
        "body": "Purpose: So that any agent (Claude or other models) parses user intent consistently, follow these rules when deriving task_type, model_id, and parameters from natural language. Do not guess — normalize first, then map."
      },
      {
        "title": "1. User phrasing → task_type",
        "body": "User intent / phrasingtask_typeNotesOnly text, no imagetext_to_video\"生成一段…视频\" / \"text to video\"One image as first frame (图成为第1帧)image_to_video\"把这张图动起来\" / \"用这张图做视频\" / \"图生视频\"One image as reference (视觉参考，非第1帧)reference_image_to_video\"参考这张图生成\" / \"像这张风格/角色\"Two images (start + end)first_last_frame_to_video\"首帧+尾帧\" / \"从A过渡到B\"\n\nWhen in doubt: \"把图动起来\" / \"图动\" → image_to_video; \"参考这张图\" / \"按这张风格\" → reference_image_to_video."
      },
      {
        "title": "2. Model name / alias → model_id (normalize then lookup)",
        "body": "Normalize user wording (case-insensitive, ignore spaces), then map to model_id:\n\nUser says (examples)For t2v → model_idFor i2v → model_id万 / Wan / 万2.6 / wan2.6wan2.6-t2vwan2.6-i2v可灵 / Kling / Kling O1 / 可灵O1kling-video-o1kling-video-o1Kling 2.6 / 可灵2.6kling-v2-6kling-v2-6海螺 / Hailuo / 海螺2.3MiniMax-Hailuo-2.3MiniMax-Hailuo-2.3Hailuo 2.0 / 海螺2.0MiniMax-Hailuo-02MiniMax-Hailuo-02Vidu / Vidu Q2viduq2(i2v: viduq2-pro for \"Vidu Q2 Pro\")Veo / Google Veo / Veo 3.1veo-3.1-generate-previewveo-3.1-generate-previewSora / Sora 2 Prosora-2-prosora-2-proPixverse / Pixverse V5.5pixversepixverse最便宜 / 最省钱 / cheapest / budgetviduq2 (5 pts)wan2.6-i2v or per product list最好 / 最高质量 / best / premiumPrefer Kling O1 / Veo 3.1Same\n\nIf the user names a model not in the table, match by Name in the \"Supported Models\" tables below and use its model_id for the chosen task_type."
      },
      {
        "title": "3. User phrasing → duration / resolution / aspect_ratio",
        "body": "User says (examples)ParameterNormalized valueFallback if unsupported5秒 / 5s / 5 secondduration5—10秒 / 10sduration10—15秒 / 15sduration15—1分钟 / 1 minduration—Use 15 if model max is 15s; tell user \"当前最长15秒\"横屏 / 16:9 / 横向aspect_ratio16:9—竖屏 / 9:16 / 竖向aspect_ratio9:16—1:1 / 方形aspect_ratio1:1—720P / 720presolution720P—1080P / 1080p / 高清resolution1080P—4K / 4kresolution4KOnly if model supports (e.g. Veo 3.1)\n\nIf the user does not specify duration/resolution/aspect_ratio, use form_config defaults from the product list for the chosen model (e.g. 5s, 720P or 1080P, 16:9)."
      },
      {
        "title": "⚙️ How This Skill Works",
        "body": "For transparency: This skill uses a bundled Python script (scripts/ima_video_create.py) to call the IMA Open API. The script:\n\nSends your prompt to IMA's servers (two domains, see below)\nUses --user-id only locally as a key for storing your model preferences\nReturns a video URL when generation is complete"
      },
      {
        "title": "🌐 Network Endpoints Used",
        "body": "This skill connects to two domains owned by IMA Studio for complete functionality:\n\nDomainPurposeWhat's SentAuthenticationapi.imastudio.comMain API (task creation, status polling)Prompts, model params, task IDsBearer token (IMA API key)imapi.liveme.comImage upload service (OSS token generation)Image files (for i2v/ref tasks), IMA API keyIMA API key + APP_KEY signature\n\nWhy two domains?\n\napi.imastudio.com: IMA's video generation API (handles task orchestration)\nimapi.liveme.com: IMA's media storage infrastructure (handles large file uploads)\nBoth services are owned and operated by IMA Studio\n\nPrivacy implications:\n\nYour IMA API key is sent to both domains for authentication\nImage files are uploaded to imapi.liveme.com to obtain CDN URLs (for image_to_video, first_last_frame_to_video, reference_image_to_video tasks)\nVideo generation happens on api.imastudio.com using the CDN URLs\nFor text_to_video tasks (no image input), only api.imastudio.com is contacted\n\nSecurity verification:\n\n# List all network endpoints in the code:\ngrep -n \"https://\" scripts/ima_video_create.py\n\n# Expected output:\n# 57: DEFAULT_BASE_URL = \"https://api.imastudio.com\"\n# 58: DEFAULT_IM_BASE_URL = \"https://imapi.liveme.com\"\n\nIf you're concerned about the two-domain architecture:\n\nReview IMA Studio's privacy policy at https://imastudio.com/privacy\nContact IMA technical support to confirm domain ownership: support@imastudio.com\nUse a test/scoped API key first (see security notice below)"
      },
      {
        "title": "⚠️ Credential Security Notice",
        "body": "Your IMA API key is sent to TWO domains:\n\napi.imastudio.com — Main video generation API\nimapi.liveme.com — Image upload service (only when using image-to-video tasks)\n\nBoth domains are owned by IMA Studio, but if you're concerned about credential exposure:\n\n✅ Best practices:\n\nUse a test/scoped API key for initial testing (create at https://imastudio.com/api-keys)\nSet a low quota (e.g., 100 credits) for the test key\nRotate your key after testing if needed\nContact IMA support to confirm domain ownership: support@imastudio.com\n\n❌ Do NOT:\n\nUse a production key if you're uncomfortable with the two-domain architecture\nShare your API key with others\nCommit your API key to version control\n\nWhat gets sent to IMA servers:\n\n✅ Your video prompt/description\n✅ Model selection (Wan/Hailuo/Kling/etc.)\n✅ Video parameters (duration, resolution, etc.)\n✅ Image files (for image-to-video tasks, uploaded to imapi.liveme.com)\n✅ IMA API key (for authentication to both domains)\n❌ NO user_id (it's only used locally)\n\nWhat's stored locally:\n\n~/.openclaw/memory/ima_prefs.json - Your model preferences (< 1 KB)\n~/.openclaw/logs/ima_skills/ - Generation logs (auto-deleted after 7 days)"
      },
      {
        "title": "Agent Execution (Internal Reference)",
        "body": "Note for users: You can review the script source at scripts/ima_video_create.py anytime.\nThe agent uses this script to simplify API calls. Network requests go to two IMA Studio domains: api.imastudio.com (API) and imapi.liveme.com (image uploads).\n\nUse the bundled script internally to ensure correct parameter construction:\n\n# Text to video\npython3 {baseDir}/scripts/ima_video_create.py \\\n  --api-key  $IMA_API_KEY \\\n  --task-type text_to_video \\\n  --model-id  wan2.6-t2v \\\n  --prompt   \"a puppy runs across a sunny meadow, cinematic\" \\\n  --user-id  {user_id} \\\n  --output-json\n\n# Image to video\npython3 {baseDir}/scripts/ima_video_create.py \\\n  --api-key      $IMA_API_KEY \\\n  --task-type    image_to_video \\\n  --model-id     wan2.6-i2v \\\n  --prompt       \"camera slowly zooms in\" \\\n  --input-images https://example.com/photo.jpg \\\n  --user-id      {user_id} \\\n  --output-json\n\n✅ Local images: --input-images accepts both HTTPS URLs and local file paths. Local files are automatically uploaded to IMA CDN by the script (no need to host them first).\n\n# First-last frame to video\npython3 {baseDir}/scripts/ima_video_create.py \\\n  --api-key      $IMA_API_KEY \\\n  --task-type    first_last_frame_to_video \\\n  --model-id     kling-video-o1 \\\n  --prompt       \"smooth transition\" \\\n  --input-images https://example.com/first.jpg https://example.com/last.jpg \\\n  --user-id      {user_id} \\\n  --output-json\n\nThe script outputs JSON — parse it to get the result URL and pass it to the user via the UX protocol messages below.\n\n🚨 CRITICAL: How to send the video to user (Feishu/Discord/IM)\n\n# ✅ CORRECT: Use the remote URL directly\nvideo_url = json_output[\"url\"]\nmessage(\n    action=\"send\",\n    media=video_url,  # Direct HTTPS URL → renders inline video player\n    caption=\"✅ 视频生成成功！\\n• 模型：[Model Name]\\n• 耗时：[X]s\\n• 消耗积分：[N pts]\"\n)\n\n# ❌ WRONG: Download to local file first\n# curl -o /tmp/video.mp4 {video_url}\n# message(media=\"/tmp/video.mp4\")  # Shows as file attachment (📎 path), NOT playable\n\nWhy this matters:\n\n✅ Remote URL → Feishu renders inline video player with ▶ button\n❌ Local file path → Feishu shows file attachment (📎 /tmp/...), not playable\n\nAlways use the remote URL directly. Never download the video to local storage."
      },
      {
        "title": "Sora 2 Pro — Content Safety Policy",
        "body": "⚠️ Important: Sora 2 Pro has strict content safety policies (OpenAI policy).\n\nContent Restrictions:\n\n❌ Cannot generate: people, celebrities, IP assets (e.g., Mickey Mouse)\n❌ Strict prompt moderation\n✅ Safe themes: landscapes, abstract patterns, animals, nature scenes\n\nRecommended Prompts:\n\n✅ \"A sunset over mountains\"\n✅ \"Abstract colorful flowing patterns\"\n✅ \"A bird flying through clouds\"\n\nAvoid:\n\n❌ \"A person walking\" (people)\n❌ \"Mickey Mouse dancing\" (IP asset)\n❌ Celebrity names or recognizable figures\n\nIf your prompt is rejected, try using more abstract or nature-focused descriptions.\n\nCall IMA Open API to create AI-generated videos. All endpoints require an ima_* API key. The core flow is: query products → create task → poll until done."
      },
      {
        "title": "🔒 Security & Transparency Policy",
        "body": "This skill is community-maintained and open for inspection."
      },
      {
        "title": "✅ What Users CAN Do",
        "body": "Full transparency:\n\n✅ Review all source code: Check scripts/ima_video_create.py and ima_logger.py anytime\n✅ Verify network calls: Network requests go to two IMA Studio domains: api.imastudio.com (API) and imapi.liveme.com (image uploads). See \"🌐 Network Endpoints Used\" section above for full details.\n✅ Inspect local data: View ~/.openclaw/memory/ima_prefs.json and log files\n✅ Control privacy: Delete preferences/logs anytime, or disable file writes (see below)\n\nConfiguration allowed:\n\n✅ Set API key in environment or agent config:\n\nEnvironment variable: export IMA_API_KEY=ima_your_key_here\nOpenClaw/MCP config: Add IMA_API_KEY to agent's environment configuration\nGet your key at: https://imastudio.com\n\n\n✅ Use scoped/test keys: Test with limited API keys, rotate after testing\n✅ Disable file writes: Make prefs/logs read-only or symlink to /dev/null\n\nData control:\n\n✅ View stored data: cat ~/.openclaw/memory/ima_prefs.json\n✅ Delete preferences: rm ~/.openclaw/memory/ima_prefs.json (resets to defaults)\n✅ Delete logs: rm -rf ~/.openclaw/logs/ima_skills/ (auto-cleanup after 7 days anyway)"
      },
      {
        "title": "⚠️ Advanced Users: Fork & Modify",
        "body": "If you need to modify this skill for your use case:\n\nFork the repository (don't modify the original)\nUpdate your fork with your changes\nTest thoroughly with limited API keys\nDocument your changes for troubleshooting\n\nNote: Modified skills may break API compatibility or introduce security issues. Official support only covers the unmodified version."
      },
      {
        "title": "❌ What to AVOID (Security Risks)",
        "body": "Actions that could compromise security:\n\n❌ Sharing API keys publicly or in skill files\n❌ Modifying API endpoints to unknown servers\n❌ Disabling SSL/TLS certificate verification\n❌ Logging sensitive user data (prompts, IDs, etc.)\n❌ Bypassing authentication or billing mechanisms\n\nWhy this matters:\n\nAPI Compatibility: Skill logic aligns with IMA Open API schema\nSecurity: Malicious modifications could leak credentials or bypass billing\nSupport: Modified skills may not be supported\nCommunity: Breaking changes affect all users"
      },
      {
        "title": "📋 Privacy & Data Handling Summary",
        "body": "What this skill does with your data:\n\nData TypeSent to IMA?Stored Locally?User ControlVideo prompts✅ Yes (required for generation)❌ NoNone (required)API key✅ Yes (authentication header)❌ NoSet via env varuser_id (optional CLI arg)❌ Never (local preference key only)✅ Yes (as prefs file key)Change --user-id valueModel preferences❌ No✅ Yes (~/.openclaw)Delete anytimeGeneration logs❌ No✅ Yes (~/.openclaw)Auto-cleanup 7 days\n\nPrivacy recommendations:\n\nUse test/scoped API keys for initial testing\nNote: --user-id is never sent to IMA servers - it's only used locally as a key for storing preferences in ~/.openclaw/memory/ima_prefs.json\nReview source code at scripts/ima_video_create.py to verify network calls (search for create_task function)\nRotate API keys after testing or if compromised\n\nGet your IMA API key: Visit https://imastudio.com to register and get started."
      },
      {
        "title": "🔧 For Skill Maintainers Only",
        "body": "Version control:\n\nAll changes must go through Git with proper version bumps (semver)\nCHANGELOG.md must document all changes\nProduction deployments require code review\n\nFile checksums (optional):\n\n# Verify skill integrity\nsha256sum SKILL.md scripts/ima_video_create.py\n\nIf users report issues, verify file integrity first."
      },
      {
        "title": "🧠 User Preference Memory",
        "body": "User preferences have highest priority when they exist. But preferences are only saved when users explicitly express model preferences — not from automatic model selection."
      },
      {
        "title": "Storage: ~/.openclaw/memory/ima_prefs.json",
        "body": "{\n  \"user_{user_id}\": {\n    \"text_to_video\":              { \"model_id\": \"wan2.6-t2v\",        \"model_name\": \"Wan 2.6\",          \"credit\": 25, \"last_used\": \"...\" },\n    \"image_to_video\":             { \"model_id\": \"wan2.6-i2v\",        \"model_name\": \"Wan 2.6\",          \"credit\": 25, \"last_used\": \"...\" },\n    \"first_last_frame_to_video\":  { \"model_id\": \"kling-video-o1\",    \"model_name\": \"Kling O1\",        \"credit\": 48, \"last_used\": \"...\" },\n    \"reference_image_to_video\":   { \"model_id\": \"kling-video-o1\",    \"model_name\": \"Kling O1\",        \"credit\": 48, \"last_used\": \"...\" }\n  }\n}"
      },
      {
        "title": "Model Selection Flow (Every Generation)",
        "body": "Step 1: Get knowledge-ai recommendation (if installed)\n\nknowledge_recommended_model = read_ima_knowledge_ai()  # e.g., \"Wan 2.6\"\n\nStep 2: Check user preference\n\nuser_pref = load_prefs().get(f\"user_{user_id}\", {}).get(task_type)  # e.g., {\"model_id\": \"kling-video-o1\", ...}\n\nStep 3: Decide which model to use\n\nif user_pref exists:\n    use_model = user_pref[\"model_id\"]  # Highest priority\nelse:\n    use_model = knowledge_recommended_model or fallback_default\n\nStep 4: Check for mismatch (for later hint)\n\nif user_pref exists and knowledge_recommended_model != user_pref[\"model_id\"]:\n    mismatch = True  # Will add hint in success message"
      },
      {
        "title": "When to Write (User Explicit Preference ONLY)",
        "body": "✅ Save preference when user explicitly specifies a model:\n\nUser saysAction用XXX / 换成XXX / 改用XXXSwitch to model XXX + save as preference以后都用XXX / 默认用XXX / always use XXXSave + confirm: ✅ 已记住！以后视频生成默认用 [XXX]我喜欢XXX / 我更喜欢XXXSave as preference\n\n❌ Do NOT save when:\n\nAgent auto-selects from knowledge-ai → not user preference\nAgent uses fallback default → not user preference\nUser says generic quality requests (see \"Clear Preference\" below) → clear preference instead"
      },
      {
        "title": "When to Clear (User Abandons Preference)",
        "body": "🗑️ Clear preference when user wants automatic selection:\n\nUser saysAction用最好的 / 用最合适的 / best / recommendedClear pref + use knowledge-ai recommendation推荐一个 / 你选一个 / 自动选择Clear pref + use knowledge-ai recommendation用默认的 / 用新的Clear pref + use knowledge-ai recommendation试试别的 / 换个试试 (without specific model)Clear pref + use knowledge-ai recommendation重新推荐Clear pref + use knowledge-ai recommendation\n\nImplementation:\n\ndel prefs[f\"user_{user_id}\"][task_type]\nsave_prefs(prefs)"
      },
      {
        "title": "⭐ Model Selection Priority",
        "body": "Selection flow:\n\nUser preference (if exists) → Highest priority, always respect\nima-knowledge-ai skill (if installed) → Professional recommendation based on task\nFallback defaults → Use table below (only if neither 1 nor 2 exists)\n\nImportant notes:\n\nUser preference is only saved when user explicitly specifies a model (see \"When to Write\" above)\nKnowledge-ai is always consulted (even when user pref exists) to detect mismatches\nWhen mismatch detected → add gentle hint in success message (does NOT interrupt generation)\n\nThe defaults below are FALLBACK only. User preferences have highest priority, then knowledge-ai recommendations.\nAlways default to the newest and most popular model. Do NOT default to the cheapest.\n\nTaskDefault Modelmodel_idversion_idCostWhytext_to_videoWan 2.6wan2.6-t2vwan2.6-t2v25 pts🔥 Most popular, balanced costtext_to_video (premium)Hailuo 2.3MiniMax-Hailuo-2.3MiniMax-Hailuo-2.338 ptsHigher qualitytext_to_video (budget)Vidu Q2viduq2viduq25 ptsLowest cost t2vimage_to_videoWan 2.6wan2.6-i2vwan2.6-i2v25 pts🔥 Most popular i2v, 1080Pimage_to_video (premium)Kling 2.6kling-v2-6kling-v2-640-160 ptsPremium Kling i2vfirst_last_frame_to_videoKling O1kling-video-o1kling-video-o148 ptsNewest Kling reasoning modelreference_image_to_videoKling O1kling-video-o1kling-video-o148 ptsBest reference fidelity\n\nSelection guide (production credits, sorted by popularity):\n\n🔥 Most popular text-to-video → Wan 2.6 (25 pts, balanced cost & quality)\nPremium text-to-video → Hailuo 2.3 (38 pts, higher quality)\nBudget text-to-video → Vidu Q2 (5 pts) or Hailuo 2.0 (12 pts)\n🔥 Most popular image_to_video → Wan 2.6 (25 pts)\nfirst_last_frame / reference → Kling O1 (48 pts)\nUser specifies cheapest → Vidu Q2 (5 pts) — only if explicitly requested"
      },
      {
        "title": "🆕 Special Case: Pixverse Model Parameter (v1.0.7+)",
        "body": "Auto-Inference Logic for Pixverse V5.5/V5/V4:\n\nProblem: Pixverse V5.5, V5, V4 lack model field in form_config from Product List API\nBackend Requirement: Backend requires model parameter (e.g., \"v5.5\", \"v5\", \"v4\")\nAuto-Fix: System automatically extracts version from model_name and injects it\n\nExample: model_name: \"Pixverse V5.5\" → auto-inject model: \"v5.5\"\nExample: model_name: \"Pixverse V4\" → auto-inject model: \"v4\"\n\n\nNote: V4.5 and V3.5 include model in form_config (no auto-inference needed)\nRelevant Task Types: All video modes (text_to_video, image_to_video, first_last_frame_to_video, reference_image_to_video)\n\nError Prevention:\n\nWithout auto-inference: err_code=400017 err_msg=Invalid value for model\nWith auto-inference (v1.0.7+): Pixverse V5.5/V5/V4 work seamlessly ✅\n\nWhy This Matters:\nSome Pixverse models (V5.5/V5/V4) have inconsistent form_config in the Product List API response. The auto-inference ensures all Pixverse versions work correctly without requiring users to manually specify the model parameter."
      },
      {
        "title": "💬 User Experience Protocol (IM / Feishu / Discord)",
        "body": "Video generation takes 1~6 minutes. Never let users wait in silence.\nAlways follow all 4 steps below, every single time."
      },
      {
        "title": "🚫 Never Say to Users",
        "body": "❌ Never say✅ What users care aboutima_video_create.py / 脚本 / script—自动化脚本 / automation—自动处理产品列表 / 查询接口—自动解析参数 / 智能轮询—attribute_id / model_version / form_config—API 调用 / HTTP 请求 / 任何技术参数名—\n\nOnly tell users: model name · estimated time · credits · result URL · plain-language status."
      },
      {
        "title": "Estimated Generation Time per Model",
        "body": "ModelEstimated TimePoll EverySend Progress EveryWan 2.6 (t2v / i2v)60~120s8s30sHailuo 2.060~120s8s30sHailuo 2.360~120s8s30sVidu Q1 / Q260~120s8s30sPixverse V3.5~V5.560~120s8s30sKling 1.660~120s8s30sKling 2.1 Master90~180s8s40sSeeDance 1.0 / 1.5 Pro90~180s8s40sGoogle Veo 3.1 Fast90~180s8s40sKling 2.5 Turbo120~240s8s45sSora 2120~240s8s45sWan 2.590~180s8s40sKling 2.6120~240s8s45sKling O1180~360s8s60sSora 2 Pro180~360s8s60sGoogle Veo 3.1120~300s8s50sGoogle Veo 3.0180~360s8s60s\n\nestimated_max_seconds = upper bound of the range (e.g. 180 for Kling 2.1 Master, 360 for Kling O1)."
      },
      {
        "title": "Step 1 — Pre-Generation Notification (with Cost Transparency)",
        "body": "Before calling the create API, send this message immediately:\n\n🎬 开始生成视频，请稍候…\n• 模型：[Model Name]\n• 预计耗时：[X ~ Y 秒]（约 [X/60 ~ Y/60] 分钟）\n• 消耗积分：[N pts]\n\n视频生成需要一定时间，我会每隔一段时间汇报进度 🙏\n\nCost transparency (critical for video):\n\nFor balanced/default models (25 pts): \"使用 Wan 2.6（25 积分，最新 Wan）\"\nFor premium models (>50 pts):\n\nIf auto-selected: \"使用 Wan 2.6（25 积分）。若需更高质量可选 Kling 2.1 Master（150 积分）\"\nIf user explicit: \"使用高端模型 Kling 2.1 Master（150 积分），质量最佳\"\n\n\nFor budget (user explicit): \"使用 Vidu Q2（5 积分，最省钱选项）\"\n\nAdapt language to match the user. For expensive models (>50 pts), always mention cheaper alternatives unless user explicitly requested premium quality.\n\nAdapt language to match the user. English → 🎬 Starting video generation, this may take [X~Y] seconds. I'll update you on progress…"
      },
      {
        "title": "Step 2 — Progress Updates",
        "body": "Poll the task detail API every 8s.\nSend a progress update message every [Send Progress Every] seconds per the table above.\n\n⏳ 视频生成中… [P]%\n已等待 [elapsed]s，预计最长 [max]s\n\nProgress formula:\n\nP = min(95, floor(elapsed_seconds / estimated_max_seconds * 100))\n\nCap at 95% — never show 100% until the API returns success\nIf elapsed > estimated_max: keep P at 95% and append 「快了，稍等一下…」\nExample: elapsed=120s, max=180s → P = min(95, floor(120/180*100)) = min(95, 66) = 66%\nExample: elapsed=200s, max=180s → P = 95%（冻结 + 「快了，稍等一下…」）"
      },
      {
        "title": "Step 3 — Success Notification (Push video via message tool)",
        "body": "When task status = success:\n\n3.1 Send video player first (Feishu will render inline player):\n\n# Get result URL from script output or task detail API\nresult = get_task_result(task_id)\nvideo_url = result[\"medias\"][0][\"url\"]\n\n# Build caption\ncaption = f\"\"\"✅ 视频生成成功！\n• 模型：[Model Name]\n• 耗时：预计 [X~Y]s，实际 [actual]s\n• 消耗积分：[N pts]\n\n[视频描述]\"\"\"\n\n# Add mismatch hint if user pref conflicts with knowledge-ai recommendation\nif user_pref_exists and knowledge_recommended_model != used_model:\n    caption += f\"\"\"\n\n💡 提示：当前任务也许用 {knowledge_recommended_model} 也会不错（{reason}，{cost} pts）\"\"\"\n\n# Send video with caption\nmessage(\n    action=\"send\",\n    media=video_url,  # ⚠️ Use HTTPS URL directly, NOT local file path\n    caption=caption\n)\n\nMismatch hint example:\n\n✅ 视频生成成功！\n• 模型：Midjourney（你的偏好模型）\n• 耗时：45s\n• 消耗积分：8 pts\n\n💡 提示：当前任务也许用 Wan 2.6 也会不错（写实风格更合适，25 pts）\n\n[视频]\n\nImportant:\n\nHint is non-intrusive — does NOT interrupt generation\nOnly shown when user pref conflicts with knowledge-ai recommendation\nUser can ignore the hint; video is already delivered\n\n3.2 Then send link as text (for copying/sharing):\n\n# Send link message immediately after\nmessage(\n    action=\"send\",\n    message=f\"\"\"🔗 视频链接（方便复制分享）：\n{video_url}\"\"\"\n)\n\nCritical:\n\nUse the remote HTTPS URL directly as media parameter. Do NOT download to local file first.\nSend video first (for inline playback), then send link text (for copying/sharing).\n\nFor Feishu: Direct video URL → inline video player with play button. Local file path → file attachment (📎 path)."
      },
      {
        "title": "Step 4 — Failure Notification",
        "body": "When task status = failed or any API/network error, send:\n\n❌ 视频生成失败\n• 原因：[natural_language_error_message]\n• 建议改用：\n  - [Alt Model 1]（[特点]，[N pts]）\n  - [Alt Model 2]（[特点]，[N pts]）\n\n需要我帮你用其他模型重试吗？\n\n⚠️ CRITICAL: Error Message Translation\n\nNEVER show technical error messages to users. Always translate API errors into natural language.\nAPI key & credits: 密钥与积分管理入口为 imaclaw.ai（与 imastudio.com 同属 IMA 平台）。Key and subscription management: imaclaw.ai (same IMA platform as imastudio.com).\n\nTechnical Error❌ Never Say✅ Say Instead (Chinese)✅ Say Instead (English)401 Unauthorized 🆕Invalid API key / 401 Unauthorized❌ API密钥无效或未授权<br>💡 生成新密钥: https://www.imaclaw.ai/imaclaw/apikey❌ API key is invalid or unauthorized<br>💡 Generate API Key: https://www.imaclaw.ai/imaclaw/apikey4008 Insufficient points 🆕Insufficient points / Error 4008❌ 积分不足，无法创建任务<br>💡 购买积分: https://www.imaclaw.ai/imaclaw/subscription❌ Insufficient points to create this task<br>💡 Buy Credits: https://www.imaclaw.ai/imaclaw/subscription\"Invalid product attribute\" / \"Insufficient points\"Invalid product attribute生成参数配置异常，请稍后重试Configuration error, please try again laterError 6006 (credit mismatch)Error 6006积分计算异常，系统正在修复Points calculation error, system is fixingError 6010 (attribute_id mismatch)Attribute ID does not match模型参数不匹配，请尝试其他模型Model parameters incompatible, try another modelerror 400 (bad request)error 400 / Bad request视频参数设置有误，请调整时长或分辨率Video parameter error, adjust duration or resolutionresource_status == 2Resource status 2 / Failed视频生成遇到问题，建议换个模型试试Video generation failed, try another modelstatus == \"failed\" (no details)Task failed这次生成没成功，要不换个模型试试？Generation unsuccessful, try a different model?timeoutTask timed out / Timeout error视频生成时间过长已超时，建议用更快的模型Video generation took too long, try a faster modelNetwork error / Connection refusedConnection refused / Network error网络连接不稳定，请检查网络后重试Network connection unstable, check network and retryRate limit exceeded429 Too Many Requests / Rate limit请求过于频繁，请稍等片刻再试Too many requests, please wait a momentPrompt moderation (Sora 2 Pro only)Content policy violation提示词包含敏感内容（如人物），Sora 不支持，请换其他模型Prompt contains restricted content (e.g. people), Sora doesn't support it, try another modelModel unavailableModel not available / 503 Service Unavailable当前模型暂时不可用，建议换个模型Model temporarily unavailable, try another modelImage upload failed (image_to_video only)Image upload error输入图片处理失败，请检查图片格式或换张图Input image processing failed, check format or try another imageDuration/resolution not supportedParameter not supported该模型不支持此时长或分辨率，请调整参数Model doesn't support this duration or resolution, adjust parameters\n\nGeneric fallback (when error is unknown):\n\nChinese: 视频生成遇到问题，请稍后重试或换个模型试试\nEnglish: Video generation encountered an issue, please try again or use another model\n\nBest Practices:\n\nFocus on user action: Tell users what to do next, not what went wrong technically\nBe reassuring: Use phrases like \"建议换个模型试试\" instead of \"生成失败了\"\nAvoid blame: Never say \"你的提示词有问题\" → say \"提示词需要调整一下\"\nProvide alternatives: Always suggest 1-2 alternative models in the failure message\nVideo-specific:\n\nFor Sora content policy errors, recommend Wan 2.6 or Kling O1 (more permissive)\nFor timeout errors, recommend faster models (Vidu Q2, Hailuo 2.0)\nFor image input errors, suggest checking image format (HTTPS URL, valid JPEG/PNG)\n\n\n🆕 Include actionable links (v1.0.8+): For 401/4008 errors, provide clickable links to API key generation or credit purchase pages\n\n🆕 Enhanced Error Handling (v1.0.8):\n\nThe Reflection mechanism (3 automatic retries) now provides specific, actionable suggestions for common errors:\n\n401 Unauthorized: System suggests generating a new API key with clickable link\n4008 Insufficient Points: System suggests purchasing credits with clickable link\n500 Internal Server Error: Automatic parameter degradation (resolution: 1080P → 720P → 540P, duration: 15 → 10 → 5)\n6009 No Rule Match: Automatic parameter completion from credit_rules\n6010 Attribute Mismatch: Automatic credit_rule reselection\nTimeout: Helpful info with dashboard link for background task status\n🆕 Pixverse Model Parameter (v1.0.7+): Auto-inference for missing model parameter (V5.5/V5/V4)\n\nAll error handling is automatic and transparent — users receive natural language explanations with next steps.\n\nFailure fallback table:\n\nFailed ModelFirst AltSecond AltKling 2.1 MasterWan 2.6（3pts，速度快）Hailuo 2.0（5pts）Google Veo 3.1Kling 2.1 Master（10pts）Sora 2（42pts）Kling O1Kling 2.1 Master（10pts）Kling 2.5 Turbo（37pts）Wan 2.6Hailuo 2.0（5pts）Kling 1.6（10pts）Sora 2 / ProKling 2.1 Master（10pts）Google Veo 3.1（162pts）SeeDanceKling 2.1 Master（10pts）Wan 2.6（3pts）Any / UnknownWan 2.6（3pts，最稳定）Hailuo 2.0（5pts）"
      },
      {
        "title": "Supported Models",
        "body": "⚠️ Production Environment: Model availability validated against production API on 2026-02-27."
      },
      {
        "title": "text_to_video (14 models)",
        "body": "Namemodel_idCost RangeResolutionDurationNotesWan 2.6 🌟wan2.6-t2v25-120 pts720P/1080P5-15sBalanced, most popularHailuo 2.3MiniMax-Hailuo-2.332+ pts768P6sLatest HailuoHailuo 2.0MiniMax-Hailuo-025+ pts768P6sBudget friendlyVidu Q2viduq25-70 pts540P-1080P5-10sFast generationSeeDance 1.5 Prodoubao-seedance-1.5-pro20+ pts720P4sLatest SeeDanceSora 2 Prosora-2-pro122+ pts720P+4s+Premium OpenAIKling O1kling-video-o148-120 pts—5-10sLatest Kling, with audioKling 2.6kling-v2-680+ pts—5-10sPrevious Kling genGoogle Veo 3.1veo-3.1-generate-preview70-330 pts720P-4K4-8sSOTA cinematicPixverse V5.5pixverse30+ pts540P-1080P5-8sLatest PixversePixverse V5pixverse25+ pts540P-1080P5-8s—Pixverse V4.5pixverse20+ pts540P-1080P5-8s—Pixverse V4pixverse12+ pts540P-1080P5-8s—Pixverse V3.5pixverse12+ pts540P-1080P5-8s—"
      },
      {
        "title": "image_to_video (14 models)",
        "body": "Namemodel_idCost RangeResolutionDurationNotesWan 2.6 🔥wan2.6-i2v25-120 pts720P/1080P5-15sMost popular i2vHailuo 2.3MiniMax-Hailuo-2.332+ pts768P6sLatest HailuoHailuo 2.0MiniMax-Hailuo-0225+ pts768P6s—Vidu Q2 Providuq2-pro20-70 pts540P-1080P5-10sFast i2vSeeDance 1.5 Prodoubao-seedance-1.5-pro47+ pts720P4sLatest SeeDanceSora 2 Prosora-2-pro122+ pts720P+4s+Premium OpenAIKling O1kling-video-o148-120 pts—5-10sLatest Kling, with audioKling 2.6kling-v2-680+ pts—5-10sPrevious Kling genGoogle Veo 3.1veo-3.1-generate-preview70-330 pts720P-4K4-8sSOTA cinematicPixverse V5.5pixverse24-48 pts540P-1080P5-8sLatest PixversePixverse V5pixverse24-48 pts540P-1080P5-8s—Pixverse V4.5pixverse12-48 pts540P-1080P5-8s—Pixverse V4pixverse12-48 pts540P-1080P5-8s—Pixverse V3.5pixverse12-48 pts540P-1080P5-8s—"
      },
      {
        "title": "first_last_frame_to_video (10 models)",
        "body": "Namemodel_idCost RangeDurationNotesHailuo 2.0MiniMax-Hailuo-025+ pts6sBudget optionVidu Q2 Providuq2-pro20-70 pts5-10sFast generationKling O1 🌟kling-video-o148-120 pts5-10sRecommended defaultKling 2.6kling-v2-680+ pts5-10s—Google Veo 3.1veo-3.1-generate-preview70-330 pts4-8sSOTA qualityPixverse V5.5pixverse24-48 pts5-8sLatest PixversePixverse V5pixverse24-48 pts5-8s—Pixverse V4.5pixverse12-48 pts5-8s—Pixverse V4pixverse12-48 pts5-8s—Pixverse V3.5pixverse12-48 pts5-8s—"
      },
      {
        "title": "reference_image_to_video (9 models)",
        "body": "Namemodel_idCost RangeDurationNotesVidu Q2viduq210-70 pts5-10sFast, cost-effectiveKling O1 🌟kling-video-o148-120 pts5-10sRecommended, strong referenceGoogle Veo 3.1veo-3.1-generate-preview70-330 pts4-8sSOTA cinematicPixverse (generic)pixverse12-48 pts5-8sPixverse basePixverse V5.5pixverse12-48 pts5-8sLatest PixversePixverse V5pixverse12-48 pts5-8s—Pixverse V4.5pixverse12-48 pts5-8s—Pixverse V4pixverse12-48 pts5-8s—Pixverse V3.5pixverse12-48 pts5-8s—\n\nProduction Notes (2026-02-27):\n\n✅ Active models: 14 t2v, 14 i2v, 10 first_last_frame, 9 reference_image\n🔥 Most popular: Wan 2.6 (both t2v and i2v)\n🌟 Recommended defaults: Wan 2.6 (balanced), Kling O1 (premium with audio)"
      },
      {
        "title": "Environment",
        "body": "Base URL: https://api.imastudio.com\n\nRequired/recommended headers for all /open/v1/ endpoints:\n\nHeaderRequiredValueNotesAuthorization✅Bearer ima_your_api_key_hereAPI key authenticationx-app-source✅ima_skillsFixed value — identifies skill-originated requestsx_app_languagerecommendeden / zhProduct label language; defaults to en if omitted\n\nAuthorization: Bearer ima_your_api_key_here\nx-app-source: ima_skills\nx_app_language: en"
      },
      {
        "title": "⚠️ MANDATORY: Always Query Product List First",
        "body": "CRITICAL: You MUST call /open/v1/product/list BEFORE creating any task.\nThe attribute_id field is REQUIRED in the create request. If it is 0 or missing, you get:\n\"Invalid product attribute\" → \"Insufficient points\" → task fails completely.\nNEVER construct a create request from the model table alone. Always fetch the product first."
      },
      {
        "title": "How to get attribute_id",
        "body": "# Step 1: Query product list for the target category\nGET /open/v1/product/list?app=ima&platform=web&category=text_to_video\n# (or image_to_video / first_last_frame_to_video / reference_image_to_video)\n\n# Step 2: Walk the V2 tree to find your model (type=3 leaf nodes only)\nfor group in response[\"data\"]:\n    for version in group.get(\"children\", []):\n        if version[\"type\"] == \"3\" and version[\"model_id\"] == target_model_id:\n            attribute_id  = version[\"credit_rules\"][0][\"attribute_id\"]\n            credit        = version[\"credit_rules\"][0][\"points\"]\n            model_version = version[\"id\"]    # = version_id\n            model_name    = version[\"name\"]\n            form_defaults = {f[\"field\"]: f[\"value\"] for f in version[\"form_config\"]}"
      },
      {
        "title": "Quick Reference: Known attribute_ids",
        "body": "⚠️ Production warning: attribute_id and credit values change frequently. Always call /open/v1/product/list at runtime; table below is pre-queried reference (2026-02-27).\n\nModelTaskmodel_idattribute_idcreditNotesWan 2.6 (720P, 5s)text_to_videowan2.6-t2v205725 ptsDefault, balancedWan 2.6 (1080P, 5s)text_to_videowan2.6-t2v205840 pts—Wan 2.6 (720P, 10s)text_to_videowan2.6-t2v205950 pts—Wan 2.6 (1080P, 10s)text_to_videowan2.6-t2v206080 pts—Wan 2.6 (720P, 15s)text_to_videowan2.6-t2v206175 pts—Wan 2.6 (1080P, 15s)text_to_videowan2.6-t2v2062120 pts—Kling O1 (5s, std)text_to_videokling-video-o1231348 ptsLatest KlingKling O1 (5s, pro)text_to_videokling-video-o1231460 pts—Kling O1 (10s, std)text_to_videokling-video-o1231596 pts—Kling O1 (10s, pro)text_to_videokling-video-o12316120 pts—All othersany—→ query /open/v1/product/list—Always runtime query"
      },
      {
        "title": "Common Mistakes (and resulting errors)",
        "body": "MistakeErrorattribute_id is 0 or missing\"Invalid product attribute\" → Insufficient pointsattribute_id outdated (production changed)Same errors; always query product list firstattribute_id doesn't match parameter combinationError 6010: \"Attribute ID does not match the calculated rule\"prompt at outer level instead of parameters.parameters.promptPrompt ignoredcast missing from inner parametersBilling validation failurecredit wrong / missingError 6006model_name or model_version missingWrong model routing\n\n⚠️ Critical for Google Veo 3.1 and multi-rule models:\n\nModels like Google Veo 3.1 have multiple credit_rules, each with a different attribute_id for different parameter combinations:\n\n720p + 4s + optimized → attribute_id A\n720p + 8s + optimized → attribute_id B\n4K + 4s + high → attribute_id C\n\nThe script automatically selects the correct attribute_id by matching your parameters (duration, resolution, compression_quality, generate_audio) against each rule's attributes. If the match fails, you get error 6010.\n\nFix: The bundled script now checks these video-specific parameters for smart credit_rule selection. Always use the script, not manual API construction."
      },
      {
        "title": "Core Flow",
        "body": "1. GET /open/v1/product/list?app=ima&platform=web&category=<type>\n   → REQUIRED: Get attribute_id, credit, model_version, form_config defaults\n\n[image_to_video / first_last_frame / reference_image tasks only]\n2. Upload input image(s) → get public HTTPS URL(s)\n   → See \"Image Upload\" section below\n\n3. POST /open/v1/tasks/create\n   → Must include: attribute_id, model_name, model_version, credit, cast, prompt (nested!)\n\n4. POST /open/v1/tasks/detail  {task_id: \"...\"}\n   → Poll every 8s until medias[].resource_status == 1\n   → Extract url (mp4) and cover (thumbnail) from completed media\n\nVideo generation is slower than image — poll every 8s and set timeout to 600s."
      },
      {
        "title": "Image Upload (Required for Video Tasks with Image Input)",
        "body": "The IMA Open API does NOT accept raw bytes or base64 images. All input images must be public HTTPS URLs.\n\nScript behavior: --input-images accepts both URLs and local file paths. Local files are automatically uploaded to IMA CDN by the script — no separate upload step needed when calling the script.\n\nFor image_to_video, first_last_frame_to_video, reference_image_to_video: when a user provides an image (local file, base64, or non-public URL), you can pass a local path to the script (it will upload), or upload first in code to get a URL.\n\ndef prepare_image_url(source) -> str:\n    \"\"\"Convert any image source to a public HTTPS URL.\n    \n    - If source is already a public HTTPS URL: return as-is\n    - If source is a local file path or bytes: upload to hosting first\n    \"\"\"\n    if isinstance(source, str) and source.startswith(\"https://\"):\n        return source  # already public, use directly\n\n    # Option 1: IMA OSS (requires OSS credentials)\n    #   objectName = f\"aiagent/src/d/{date}/in/{uuid}.jpg\"\n    #   bucket.put_object(objectName, image_bytes)\n    #   return f\"https://ima.esxscloud.com/{objectName}\"\n\n    # Option 2: Any public image hosting (imgbb example)\n    import base64, requests\n    if isinstance(source, str):\n        with open(source, \"rb\") as f:\n            b64 = base64.b64encode(f.read()).decode()\n    else:\n        b64 = base64.b64encode(source).decode()\n    r = requests.post(\"https://api.imgbb.com/1/upload\",\n                      data={\"key\": IMGBB_API_KEY, \"image\": b64})\n    r.raise_for_status()\n    return r.json()[\"data\"][\"url\"]\n\n# For first_last_frame: prepare both frames\nfirst_url = prepare_image_url(\"/path/to/first.jpg\")\nlast_url  = prepare_image_url(\"/path/to/last.jpg\")\nsrc_img_url = [first_url, last_url]  # index 0 = first, index 1 = last\n\nNote: URLs must be publicly accessible — not localhost, private network, or auth-gated endpoints."
      },
      {
        "title": "Supported Task Types",
        "body": "categoryCapabilityInputtext_to_videoText → Videopromptimage_to_videoImage → Videoprompt + upload_img_srcfirst_last_frame_to_videoFirst+Last Frame → Videoprompt + src_img_url[2]reference_image_to_videoReference Image → Videoprompt + src_img_url[1+]"
      },
      {
        "title": "Detail API status values",
        "body": "FieldTypeValuesresource_statusint or null0=处理中, 1=可用, 2=失败, 3=已删除；null 当作 0statusstring\"pending\", \"processing\", \"success\", \"failed\"\n\nresource_statusstatusAction0 or nullpending / processingKeep polling1success (or completed)Stop when all medias are 1; read url / cover1failedStop, handle error2 / 3anyStop, handle error\n\nImportant: Treat resource_status: null as 0. Stop only when all medias have resource_status == 1. Check status != \"failed\" when rs=1."
      },
      {
        "title": "API 1: Product List",
        "body": "GET /open/v1/product/list?app=ima&platform=web&category=text_to_video\n\nReturns a V2 tree structure: type=2 nodes are model groups, type=3 nodes are versions (leaves). Only type=3 nodes contain credit_rules and form_config.\n\nHow to pick a version:\n\nTraverse nodes to find type=3 leaves\nUse model_id and id (= model_version) from the leaf\nPick credit_rules[].attribute_id matching desired quality\nUse form_config[].value as default parameters values (duration, resolution, aspect_ratio, etc.)"
      },
      {
        "title": "API 2: Create Task",
        "body": "POST /open/v1/tasks/create"
      },
      {
        "title": "text_to_video — Verified ✅",
        "body": "No image input. src_img_url: [], input_images: [].\n\n{\n  \"task_type\": \"text_to_video\",\n  \"enable_multi_model\": false,\n  \"src_img_url\": [],\n  \"parameters\": [{\n    \"attribute_id\":  4838,\n    \"model_id\":      \"wan2.6-t2v\",\n    \"model_name\":    \"Wan 2.6\",\n    \"model_version\": \"wan2.6-t2v\",\n    \"app\":           \"ima\",\n    \"platform\":      \"web\",\n    \"category\":      \"text_to_video\",\n    \"credit\":        25,\n    \"parameters\": {\n      \"prompt\":          \"a puppy dancing happily, sunny meadow\",\n      \"negative_prompt\": \"\",\n      \"prompt_extend\":   false,\n      \"duration\":        5,\n      \"resolution\":      \"1080P\",\n      \"aspect_ratio\":    \"16:9\",\n      \"shot_type\":       \"single\",\n      \"seed\":            -1,\n      \"n\":               1,\n      \"input_images\":    [],\n      \"cast\":            {\"points\": 3, \"attribute_id\": 4838}\n    }\n  }]\n}\n\nVideo-specific fields from form_config: duration (seconds), resolution, aspect_ratio, shot_type, negative_prompt, prompt_extend.\nResponse medias[].cover = first-frame thumbnail JPEG."
      },
      {
        "title": "image_to_video",
        "body": "Input image goes in top-level src_img_url and parameters.input_images:\n\n{\n  \"task_type\": \"image_to_video\",\n  \"enable_multi_model\": false,\n  \"src_img_url\": [\"https://example.com/scene.jpg\"],\n  \"parameters\": [{\n    \"attribute_id\":  \"<from credit_rules>\",\n    \"model_id\":      \"<model_id>\",\n    \"model_name\":    \"<model_name>\",\n    \"model_version\": \"<version_id>\",\n    \"app\":           \"ima\",\n    \"platform\":      \"web\",\n    \"category\":      \"image_to_video\",\n    \"credit\":        \"<points>\",\n    \"parameters\": {\n      \"prompt\":       \"bring this landscape alive\",\n      \"n\":            1,\n      \"input_images\": [\"https://example.com/scene.jpg\"],\n      \"cast\":         {\"points\": \"<points>\", \"attribute_id\": \"<attribute_id>\"}\n    }\n  }]\n}"
      },
      {
        "title": "first_last_frame_to_video",
        "body": "Provide exactly 2 images: index 0 = first frame, index 1 = last frame:\n\n{\n  \"task_type\": \"first_last_frame_to_video\",\n  \"src_img_url\": [\"https://example.com/first.jpg\", \"https://example.com/last.jpg\"],\n  \"parameters\": [{\n    \"category\": \"first_last_frame_to_video\",\n    \"parameters\": {\n      \"prompt\": \"smooth transition\",\n      \"n\": 1,\n      \"input_images\": [\"https://example.com/first.jpg\", \"https://example.com/last.jpg\"],\n      \"cast\": {\"points\": \"<points>\", \"attribute_id\": \"<attribute_id>\"}\n    }\n  }]\n}"
      },
      {
        "title": "reference_image_to_video",
        "body": "Provide 1 or more reference images in src_img_url:\n\n{\n  \"task_type\": \"reference_image_to_video\",\n  \"src_img_url\": [\"https://example.com/ref.jpg\"],\n  \"parameters\": [{\n    \"category\": \"reference_image_to_video\",\n    \"parameters\": {\n      \"prompt\": \"dynamic video based on reference\",\n      \"n\": 1,\n      \"input_images\": [\"https://example.com/ref.jpg\"],\n      \"cast\": {\"points\": \"<points>\", \"attribute_id\": \"<attribute_id>\"}\n    }\n  }]\n}\n\nKey fields:\n\nFieldRequiredDescriptionparameters[].credit✅Must equal credit_rules[].points. Error 6006 if wrong.parameters[].parameters.prompt✅Prompt must be nested here, NOT at top level.parameters[].parameters.cast✅{\"points\": N, \"attribute_id\": N} — mirror of credit.parameters[].parameters.n✅Number of outputs (usually 1).top-level src_img_urlimage tasksImage URL(s); 2 images for first_last_frame.parameters[].parameters.input_imagesimage tasksMust mirror src_img_url.parameters[].parameters.durationtext_to_videoVideo duration in seconds (from form_config).parameters[].parameters.resolutiontext_to_videoe.g. \"1080P\" (from form_config).parameters[].parameters.aspect_ratiotext_to_videoe.g. \"16:9\" (from form_config).\n\nResponse: data.id = task ID for polling."
      },
      {
        "title": "API 3: Task Detail (Poll)",
        "body": "POST /open/v1/tasks/detail\n{\"task_id\": \"<id from create response>\"}\n\nPoll every 8s for video tasks. Completed response:\n\n{\n  \"id\": \"task_abc\",\n  \"medias\": [{\n    \"resource_status\": 1,\n    \"url\":   \"https://cdn.../output.mp4\",\n    \"cover\": \"https://cdn.../cover.jpg\",\n    \"duration_str\": \"5s\",\n    \"format\": \"mp4\"\n  }]\n}\n\nOutput fields: url (mp4), cover (first-frame thumbnail JPEG), duration_str, format."
      },
      {
        "title": "Common Mistakes",
        "body": "MistakeFixPolling too fast for videoUse 8s interval, not 2–3sMissing duration/resolution/aspect_ratioRead defaults from form_configWrong credit valueMust exactly match credit_rules[].points (error 6006)src_img_url and input_images mismatchBoth must contain the same image URL(s)Only 1 image for first_last_frameRequires exactly 2 images (first + last)Placing prompt at param top-levelprompt must be inside parameters[].parameters"
      },
      {
        "title": "Python Example",
        "body": "import time\nimport requests\n\nBASE_URL = \"https://api.imastudio.com\"\nAPI_KEY  = \"ima_your_key_here\"\nHEADERS  = {\n    \"Authorization\":  f\"Bearer {API_KEY}\",\n    \"Content-Type\":   \"application/json\",\n    \"x-app-source\":   \"ima_skills\",\n    \"x_app_language\": \"en\",\n}\n\n\ndef get_products(category: str) -> list:\n    \"\"\"Returns flat list of type=3 version nodes from V2 tree.\"\"\"\n    r = requests.get(\n        f\"{BASE_URL}/open/v1/product/list\",\n        headers=HEADERS,\n        params={\"app\": \"ima\", \"platform\": \"web\", \"category\": category},\n    )\n    r.raise_for_status()\n    nodes = r.json()[\"data\"]\n    versions = []\n    for node in nodes:\n        for child in node.get(\"children\") or []:\n            if child.get(\"type\") == \"3\":\n                versions.append(child)\n            for gc in child.get(\"children\") or []:\n                if gc.get(\"type\") == \"3\":\n                    versions.append(gc)\n    return versions\n\n\ndef create_video_task(task_type: str, prompt: str, product: dict, src_img_url: list = None, **extra) -> str:\n    \"\"\"Returns task_id. src_img_url: list of image URLs (1+ for image tasks, 2 for first_last_frame).\"\"\"\n    src_img_url = src_img_url or []\n    rule = product[\"credit_rules\"][0]\n    form_defaults = {f[\"field\"]: f[\"value\"] for f in product.get(\"form_config\", []) if f.get(\"value\") is not None}\n\n    nested_params = {\n        \"prompt\": prompt,\n        \"n\":      1,\n        \"input_images\": src_img_url,\n        \"cast\":   {\"points\": rule[\"points\"], \"attribute_id\": rule[\"attribute_id\"]},\n        **form_defaults,\n    }\n    nested_params.update({k: v for k, v in extra.items()\n                          if k in (\"duration\", \"resolution\", \"aspect_ratio\", \"shot_type\",\n                                   \"negative_prompt\", \"prompt_extend\", \"seed\")})\n\n    body = {\n        \"task_type\":          task_type,\n        \"enable_multi_model\": False,\n        \"src_img_url\":        src_img_url,\n        \"parameters\": [{\n            \"attribute_id\":  rule[\"attribute_id\"],\n            \"model_id\":      product[\"model_id\"],\n            \"model_name\":    product[\"name\"],\n            \"model_version\": product[\"id\"],\n            \"app\":           \"ima\",\n            \"platform\":      \"web\",\n            \"category\":      task_type,\n            \"credit\":        rule[\"points\"],\n            \"parameters\":    nested_params,\n        }],\n    }\n    r = requests.post(f\"{BASE_URL}/open/v1/tasks/create\", headers=HEADERS, json=body)\n    r.raise_for_status()\n    return r.json()[\"data\"][\"id\"]\n\n\ndef poll(task_id: str, interval: int = 8, timeout: int = 600) -> dict:\n    deadline = time.time() + timeout\n    while time.time() < deadline:\n        r = requests.post(f\"{BASE_URL}/open/v1/tasks/detail\", headers=HEADERS, json={\"task_id\": task_id})\n        r.raise_for_status()\n        task   = r.json()[\"data\"]\n        medias = task.get(\"medias\", [])\n        if medias:\n            if any(m.get(\"status\") == \"failed\" for m in medias):\n                raise RuntimeError(f\"Task failed: {task_id}\")\n            rs = lambda m: m.get(\"resource_status\") if m.get(\"resource_status\") is not None else 0\n            if any(rs(m) == 2 for m in medias):\n                raise RuntimeError(f\"Task failed: {task_id}\")\n            if all(rs(m) == 1 for m in medias):\n                return task\n        time.sleep(interval)\n    raise TimeoutError(f\"Task timed out: {task_id}\")\n\n\n# text_to_video (Verified: Wan 2.6, response includes cover thumbnail)\nproducts = get_products(\"text_to_video\")\nwan26    = next(p for p in products if p[\"model_id\"] == \"wan2.6-t2v\")\ntask_id  = create_video_task(\n    \"text_to_video\", \"a puppy dancing happily, sunny meadow\", wan26,\n    duration=5, resolution=\"1080P\", aspect_ratio=\"16:9\",\n    shot_type=\"single\", negative_prompt=\"\", prompt_extend=False, seed=-1,\n)\nresult = poll(task_id)\nprint(result[\"medias\"][0][\"url\"])    # mp4 URL\nprint(result[\"medias\"][0][\"cover\"])  # first-frame thumbnail JPEG\n\n# image_to_video\nproducts = get_products(\"image_to_video\")\ntask_id  = create_video_task(\"image_to_video\", \"bring this landscape alive\", products[0],\n                             src_img_url=[\"https://example.com/scene.jpg\"])\nresult   = poll(task_id)\nprint(result[\"medias\"][0][\"url\"])\n\n# first_last_frame_to_video (exactly 2 images required)\nproducts = get_products(\"first_last_frame_to_video\")\nframes   = [\"https://example.com/first.jpg\", \"https://example.com/last.jpg\"]\ntask_id  = create_video_task(\"first_last_frame_to_video\", \"smooth transition\", products[0],\n                             src_img_url=frames)\nresult   = poll(task_id)\nprint(result[\"medias\"][0][\"url\"])\n\n# reference_image_to_video\nproducts = get_products(\"reference_image_to_video\")\ntask_id  = create_video_task(\"reference_image_to_video\", \"dynamic video\", products[0],\n                             src_img_url=[\"https://example.com/ref.jpg\"])\nresult   = poll(task_id)\nprint(result[\"medias\"][0][\"url\"])"
      },
      {
        "title": "Supported Models & Search Terms",
        "body": "Models: Wan 2.6, Kling O1, Kling 2.6, Google Veo 3.1, Sora 2 Pro, Pixverse V5.5, Hailuo 2.0, Hailuo 2.3, MiniMax Hailuo, SeeDance 1.5 Pro, Vidu Q2\n\nCapabilities: video generation, text-to-video, image-to-video, AI video, character animation, product demo, social media clips, storytelling, explainer video"
      }
    ],
    "body": "IMA Video AI Creation\n📋 Before you install\nCredentials: This skill requires an IMA API key at runtime (IMA_API_KEY or --api-key). The key is sent to api.imastudio.com (main API) and imapi.liveme.com (image uploads). Obtain keys at https://imastudio.com. Use a scoped or test key if you want to limit exposure.\nLocal files: The skill reads local image files you provide (for image-to-video); it also writes logs under ~/.openclaw/logs/ima_skills/ and preferences to ~/.openclaw/memory/ima_prefs.json. Do not point it at sensitive paths.\nCross-skill reads: If ima-knowledge-ai is installed, this skill instructs the agent to read that skill's reference files (~/.openclaw/skills/ima-knowledge-ai/references/*) for workflow and visual-consistency guidance. If you do not have or trust that skill, skip those steps and use this skill's built-in defaults and tables.\n⚠️ 重要：模型 ID 参考\n\nCRITICAL: When calling the script, you MUST use the exact model_id (second/third column), NOT the friendly model name. Do NOT infer model_id from the friendly name.\n\nQuick Reference Table:\n\n友好名称 (Friendly Name)\tmodel_id (t2v)\tmodel_id (i2v)\t说明 (Notes)\nWan 2.6\twan2.6-t2v\twan2.6-i2v\t⚠️ Note -t2v/-i2v suffix\nKling O1\tkling-video-o1\tkling-video-o1\t⚠️ Note video- prefix\nKling 2.6\tkling-v2-6\tkling-v2-6\t⚠️ Note v prefix\nHailuo 2.3\tMiniMax-Hailuo-2.3\tMiniMax-Hailuo-2.3\t⚠️ Note MiniMax- prefix\nHailuo 2.0\tMiniMax-Hailuo-02\tMiniMax-Hailuo-02\t⚠️ Note 02 not 2.0\nVidu Q2\tviduq2\tviduq2-pro\t⚠️ Different for t2v/i2v\nGoogle Veo 3.1\tveo-3.1-generate-preview\tveo-3.1-generate-preview\t⚠️ Note -generate-preview suffix\nSora 2 Pro\tsora-2-pro\tsora-2-pro\t✅ Straightforward\nPixverse\tpixverse\tpixverse\t✅ Same as friendly name\nSeeDance 1.5 Pro\tdoubao-seedance-1.5-pro\tdoubao-seedance-1.5-pro\t⚠️ Note doubao- prefix\n\nUser Input Variations Handled by Agent:\n\n\"万\" / \"万2.6\" / \"Wan\" → Wan 2.6 → wan2.6-t2v / wan2.6-i2v\n\"可灵\" / \"可灵O1\" / \"Kling O1\" → kling-video-o1\n\"可灵2.6\" / \"Kling 2.6\" → kling-v2-6\n\"海螺\" / \"海螺2.3\" / \"Hailuo\" → MiniMax-Hailuo-2.3\n\"Veo\" / \"Google Veo\" → veo-3.1-generate-preview\n\nHow to get the correct model_id:\n\nCheck this table first\nUse --list-models --task-type text_to_video (or image_to_video)\nRefer to command examples below\n\nExample:\n\n# ❌ WRONG: Inferring from friendly name\n--model-id kling-o1\n\n# ✅ CORRECT: Using exact model_id from table\n--model-id kling-video-o1\n\n⚠️ MANDATORY PRE-CHECK: Read Knowledge Base First!\n\nIf ima-knowledge-ai is not installed: Skip all \"Read …\" steps below; use only this SKILL's default models and the 📥 User Input Parsing tables for task_type, model_id, and parameters.\n\nBEFORE executing ANY video generation task, you MUST:\n\nCRITICAL: Understand video modes — Read ima-knowledge-ai/references/video-modes.md:\n\nimage_to_video = first frame to video (输入图成为第1帧)\nreference_image_to_video = reference appearance to video (输入图是视觉参考，不是第1帧)\nThese are COMPLETELY DIFFERENT concepts!\nWrong mode choice = wrong result\n\nCheck for visual consistency needs — Read ima-knowledge-ai/references/visual-consistency.md if:\n\nUser mentions: \"系列\"、\"分镜\"、\"同一个\"、\"角色\"、\"续\"、\"多个镜头\"\nTask involves: multi-shot videos, character continuity, scene consistency\nSecond+ request about same subject (e.g., \"旺财在游泳\" after \"生成旺财照片\")\n\nCheck workflow/model/parameters — Read relevant ima-knowledge-ai/references/ sections if:\n\nComplex multi-step video production\nUnsure which model to use\nNeed parameter guidance (duration, resolution, reference strength)\n\nWhy this matters:\n\nAI video generation defaults to 独立生成 (independent generation) each time\nWithout reference images, \"same character/scene\" will look completely different\nText-to-video CANNOT maintain visual consistency — must use image-based modes\n\nExample failure case:\n\nUser: \"生成一只小狗，叫旺财\" \n  → You: generate dog image A\n\nUser: \"生成旺财在游泳的视频\"\n  → ❌ Wrong: text_to_video \"狗在游泳\" (new dog, different from A)\n  → ✅ Right: read visual-consistency.md + video-modes.md → \n             use image_to_video with image A as first frame\n\n\nHow to check:\n\n# Step 1: Read knowledge base\nread(\"~/.openclaw/skills/ima-knowledge-ai/references/video-modes.md\")\nread(\"~/.openclaw/skills/ima-knowledge-ai/references/visual-consistency.md\")\n\n# Step 2: Identify if reference image needed\nif \"same subject\" or \"series\" or \"character continuity\":\n    # Use image-based mode with previous result as reference\n    reference_image = previous_generation_result\n    \n    # Choose mode based on requirement\n    if \"reference becomes first frame\":\n        use_image_to_video(prompt, reference_image)\n    else:\n        use_reference_image_to_video(prompt, reference_image, reference_strength=0.8)\nelse:\n    # OK to use text-to-video\n    use_text_to_video(prompt)\n\n\nNo exceptions — if you skip this check and generate visually inconsistent results, that's a bug.\n\n📥 User Input Parsing (Model & Parameter Recognition)\n\nPurpose: So that any agent (Claude or other models) parses user intent consistently, follow these rules when deriving task_type, model_id, and parameters from natural language. Do not guess — normalize first, then map.\n\n1. User phrasing → task_type\nUser intent / phrasing\ttask_type\tNotes\nOnly text, no image\ttext_to_video\t\"生成一段…视频\" / \"text to video\"\nOne image as first frame (图成为第1帧)\timage_to_video\t\"把这张图动起来\" / \"用这张图做视频\" / \"图生视频\"\nOne image as reference (视觉参考，非第1帧)\treference_image_to_video\t\"参考这张图生成\" / \"像这张风格/角色\"\nTwo images (start + end)\tfirst_last_frame_to_video\t\"首帧+尾帧\" / \"从A过渡到B\"\n\nWhen in doubt: \"把图动起来\" / \"图动\" → image_to_video; \"参考这张图\" / \"按这张风格\" → reference_image_to_video.\n\n2. Model name / alias → model_id (normalize then lookup)\n\nNormalize user wording (case-insensitive, ignore spaces), then map to model_id:\n\nUser says (examples)\tFor t2v → model_id\tFor i2v → model_id\n万 / Wan / 万2.6 / wan2.6\twan2.6-t2v\twan2.6-i2v\n可灵 / Kling / Kling O1 / 可灵O1\tkling-video-o1\tkling-video-o1\nKling 2.6 / 可灵2.6\tkling-v2-6\tkling-v2-6\n海螺 / Hailuo / 海螺2.3\tMiniMax-Hailuo-2.3\tMiniMax-Hailuo-2.3\nHailuo 2.0 / 海螺2.0\tMiniMax-Hailuo-02\tMiniMax-Hailuo-02\nVidu / Vidu Q2\tviduq2\t(i2v: viduq2-pro for \"Vidu Q2 Pro\")\nVeo / Google Veo / Veo 3.1\tveo-3.1-generate-preview\tveo-3.1-generate-preview\nSora / Sora 2 Pro\tsora-2-pro\tsora-2-pro\nPixverse / Pixverse V5.5\tpixverse\tpixverse\n最便宜 / 最省钱 / cheapest / budget\tviduq2 (5 pts)\twan2.6-i2v or per product list\n最好 / 最高质量 / best / premium\tPrefer Kling O1 / Veo 3.1\tSame\n\nIf the user names a model not in the table, match by Name in the \"Supported Models\" tables below and use its model_id for the chosen task_type.\n\n3. User phrasing → duration / resolution / aspect_ratio\nUser says (examples)\tParameter\tNormalized value\tFallback if unsupported\n5秒 / 5s / 5 second\tduration\t5\t—\n10秒 / 10s\tduration\t10\t—\n15秒 / 15s\tduration\t15\t—\n1分钟 / 1 min\tduration\t—\tUse 15 if model max is 15s; tell user \"当前最长15秒\"\n横屏 / 16:9 / 横向\taspect_ratio\t16:9\t—\n竖屏 / 9:16 / 竖向\taspect_ratio\t9:16\t—\n1:1 / 方形\taspect_ratio\t1:1\t—\n720P / 720p\tresolution\t720P\t—\n1080P / 1080p / 高清\tresolution\t1080P\t—\n4K / 4k\tresolution\t4K\tOnly if model supports (e.g. Veo 3.1)\n\nIf the user does not specify duration/resolution/aspect_ratio, use form_config defaults from the product list for the chosen model (e.g. 5s, 720P or 1080P, 16:9).\n\n⚙️ How This Skill Works\n\nFor transparency: This skill uses a bundled Python script (scripts/ima_video_create.py) to call the IMA Open API. The script:\n\nSends your prompt to IMA's servers (two domains, see below)\nUses --user-id only locally as a key for storing your model preferences\nReturns a video URL when generation is complete\n🌐 Network Endpoints Used\n\nThis skill connects to two domains owned by IMA Studio for complete functionality:\n\nDomain\tPurpose\tWhat's Sent\tAuthentication\napi.imastudio.com\tMain API (task creation, status polling)\tPrompts, model params, task IDs\tBearer token (IMA API key)\nimapi.liveme.com\tImage upload service (OSS token generation)\tImage files (for i2v/ref tasks), IMA API key\tIMA API key + APP_KEY signature\n\nWhy two domains?\n\napi.imastudio.com: IMA's video generation API (handles task orchestration)\nimapi.liveme.com: IMA's media storage infrastructure (handles large file uploads)\nBoth services are owned and operated by IMA Studio\n\nPrivacy implications:\n\nYour IMA API key is sent to both domains for authentication\nImage files are uploaded to imapi.liveme.com to obtain CDN URLs (for image_to_video, first_last_frame_to_video, reference_image_to_video tasks)\nVideo generation happens on api.imastudio.com using the CDN URLs\nFor text_to_video tasks (no image input), only api.imastudio.com is contacted\n\nSecurity verification:\n\n# List all network endpoints in the code:\ngrep -n \"https://\" scripts/ima_video_create.py\n\n# Expected output:\n# 57: DEFAULT_BASE_URL = \"https://api.imastudio.com\"\n# 58: DEFAULT_IM_BASE_URL = \"https://imapi.liveme.com\"\n\n\nIf you're concerned about the two-domain architecture:\n\nReview IMA Studio's privacy policy at https://imastudio.com/privacy\nContact IMA technical support to confirm domain ownership: support@imastudio.com\nUse a test/scoped API key first (see security notice below)\n⚠️ Credential Security Notice\n\nYour IMA API key is sent to TWO domains:\n\napi.imastudio.com — Main video generation API\nimapi.liveme.com — Image upload service (only when using image-to-video tasks)\n\nBoth domains are owned by IMA Studio, but if you're concerned about credential exposure:\n\n✅ Best practices:\n\nUse a test/scoped API key for initial testing (create at https://imastudio.com/api-keys)\nSet a low quota (e.g., 100 credits) for the test key\nRotate your key after testing if needed\nContact IMA support to confirm domain ownership: support@imastudio.com\n\n❌ Do NOT:\n\nUse a production key if you're uncomfortable with the two-domain architecture\nShare your API key with others\nCommit your API key to version control\n\nWhat gets sent to IMA servers:\n\n✅ Your video prompt/description\n✅ Model selection (Wan/Hailuo/Kling/etc.)\n✅ Video parameters (duration, resolution, etc.)\n✅ Image files (for image-to-video tasks, uploaded to imapi.liveme.com)\n✅ IMA API key (for authentication to both domains)\n❌ NO user_id (it's only used locally)\n\nWhat's stored locally:\n\n~/.openclaw/memory/ima_prefs.json - Your model preferences (< 1 KB)\n~/.openclaw/logs/ima_skills/ - Generation logs (auto-deleted after 7 days)\nAgent Execution (Internal Reference)\n\nNote for users: You can review the script source at scripts/ima_video_create.py anytime.\nThe agent uses this script to simplify API calls. Network requests go to two IMA Studio domains: api.imastudio.com (API) and imapi.liveme.com (image uploads).\n\nUse the bundled script internally to ensure correct parameter construction:\n\n# Text to video\npython3 {baseDir}/scripts/ima_video_create.py \\\n  --api-key  $IMA_API_KEY \\\n  --task-type text_to_video \\\n  --model-id  wan2.6-t2v \\\n  --prompt   \"a puppy runs across a sunny meadow, cinematic\" \\\n  --user-id  {user_id} \\\n  --output-json\n\n# Image to video\npython3 {baseDir}/scripts/ima_video_create.py \\\n  --api-key      $IMA_API_KEY \\\n  --task-type    image_to_video \\\n  --model-id     wan2.6-i2v \\\n  --prompt       \"camera slowly zooms in\" \\\n  --input-images https://example.com/photo.jpg \\\n  --user-id      {user_id} \\\n  --output-json\n\n\n✅ Local images: --input-images accepts both HTTPS URLs and local file paths. Local files are automatically uploaded to IMA CDN by the script (no need to host them first).\n\n# First-last frame to video\npython3 {baseDir}/scripts/ima_video_create.py \\\n  --api-key      $IMA_API_KEY \\\n  --task-type    first_last_frame_to_video \\\n  --model-id     kling-video-o1 \\\n  --prompt       \"smooth transition\" \\\n  --input-images https://example.com/first.jpg https://example.com/last.jpg \\\n  --user-id      {user_id} \\\n  --output-json\n\n\nThe script outputs JSON — parse it to get the result URL and pass it to the user via the UX protocol messages below.\n\n🚨 CRITICAL: How to send the video to user (Feishu/Discord/IM)\n\n# ✅ CORRECT: Use the remote URL directly\nvideo_url = json_output[\"url\"]\nmessage(\n    action=\"send\",\n    media=video_url,  # Direct HTTPS URL → renders inline video player\n    caption=\"✅ 视频生成成功！\\n• 模型：[Model Name]\\n• 耗时：[X]s\\n• 消耗积分：[N pts]\"\n)\n\n# ❌ WRONG: Download to local file first\n# curl -o /tmp/video.mp4 {video_url}\n# message(media=\"/tmp/video.mp4\")  # Shows as file attachment (📎 path), NOT playable\n\n\nWhy this matters:\n\n✅ Remote URL → Feishu renders inline video player with ▶ button\n❌ Local file path → Feishu shows file attachment (📎 /tmp/...), not playable\n\nAlways use the remote URL directly. Never download the video to local storage.\n\nOverview\n🛡️ Model-Specific Notes\nSora 2 Pro — Content Safety Policy\n\n⚠️ Important: Sora 2 Pro has strict content safety policies (OpenAI policy).\n\nContent Restrictions:\n\n❌ Cannot generate: people, celebrities, IP assets (e.g., Mickey Mouse)\n❌ Strict prompt moderation\n✅ Safe themes: landscapes, abstract patterns, animals, nature scenes\n\nRecommended Prompts:\n\n✅ \"A sunset over mountains\"\n✅ \"Abstract colorful flowing patterns\"\n✅ \"A bird flying through clouds\"\n\nAvoid:\n\n❌ \"A person walking\" (people)\n❌ \"Mickey Mouse dancing\" (IP asset)\n❌ Celebrity names or recognizable figures\n\nIf your prompt is rejected, try using more abstract or nature-focused descriptions.\n\nCall IMA Open API to create AI-generated videos. All endpoints require an ima_* API key. The core flow is: query products → create task → poll until done.\n\n🔒 Security & Transparency Policy\n\nThis skill is community-maintained and open for inspection.\n\n✅ What Users CAN Do\n\nFull transparency:\n\n✅ Review all source code: Check scripts/ima_video_create.py and ima_logger.py anytime\n✅ Verify network calls: Network requests go to two IMA Studio domains: api.imastudio.com (API) and imapi.liveme.com (image uploads). See \"🌐 Network Endpoints Used\" section above for full details.\n✅ Inspect local data: View ~/.openclaw/memory/ima_prefs.json and log files\n✅ Control privacy: Delete preferences/logs anytime, or disable file writes (see below)\n\nConfiguration allowed:\n\n✅ Set API key in environment or agent config:\nEnvironment variable: export IMA_API_KEY=ima_your_key_here\nOpenClaw/MCP config: Add IMA_API_KEY to agent's environment configuration\nGet your key at: https://imastudio.com\n✅ Use scoped/test keys: Test with limited API keys, rotate after testing\n✅ Disable file writes: Make prefs/logs read-only or symlink to /dev/null\n\nData control:\n\n✅ View stored data: cat ~/.openclaw/memory/ima_prefs.json\n✅ Delete preferences: rm ~/.openclaw/memory/ima_prefs.json (resets to defaults)\n✅ Delete logs: rm -rf ~/.openclaw/logs/ima_skills/ (auto-cleanup after 7 days anyway)\n⚠️ Advanced Users: Fork & Modify\n\nIf you need to modify this skill for your use case:\n\nFork the repository (don't modify the original)\nUpdate your fork with your changes\nTest thoroughly with limited API keys\nDocument your changes for troubleshooting\n\nNote: Modified skills may break API compatibility or introduce security issues. Official support only covers the unmodified version.\n\n❌ What to AVOID (Security Risks)\n\nActions that could compromise security:\n\n❌ Sharing API keys publicly or in skill files\n❌ Modifying API endpoints to unknown servers\n❌ Disabling SSL/TLS certificate verification\n❌ Logging sensitive user data (prompts, IDs, etc.)\n❌ Bypassing authentication or billing mechanisms\n\nWhy this matters:\n\nAPI Compatibility: Skill logic aligns with IMA Open API schema\nSecurity: Malicious modifications could leak credentials or bypass billing\nSupport: Modified skills may not be supported\nCommunity: Breaking changes affect all users\n📋 Privacy & Data Handling Summary\n\nWhat this skill does with your data:\n\nData Type\tSent to IMA?\tStored Locally?\tUser Control\nVideo prompts\t✅ Yes (required for generation)\t❌ No\tNone (required)\nAPI key\t✅ Yes (authentication header)\t❌ No\tSet via env var\nuser_id (optional CLI arg)\t❌ Never (local preference key only)\t✅ Yes (as prefs file key)\tChange --user-id value\nModel preferences\t❌ No\t✅ Yes (~/.openclaw)\tDelete anytime\nGeneration logs\t❌ No\t✅ Yes (~/.openclaw)\tAuto-cleanup 7 days\n\nPrivacy recommendations:\n\nUse test/scoped API keys for initial testing\nNote: --user-id is never sent to IMA servers - it's only used locally as a key for storing preferences in ~/.openclaw/memory/ima_prefs.json\nReview source code at scripts/ima_video_create.py to verify network calls (search for create_task function)\nRotate API keys after testing or if compromised\n\nGet your IMA API key: Visit https://imastudio.com to register and get started.\n\n🔧 For Skill Maintainers Only\n\nVersion control:\n\nAll changes must go through Git with proper version bumps (semver)\nCHANGELOG.md must document all changes\nProduction deployments require code review\n\nFile checksums (optional):\n\n# Verify skill integrity\nsha256sum SKILL.md scripts/ima_video_create.py\n\n\nIf users report issues, verify file integrity first.\n\n🧠 User Preference Memory\n\nUser preferences have highest priority when they exist. But preferences are only saved when users explicitly express model preferences — not from automatic model selection.\n\nStorage: ~/.openclaw/memory/ima_prefs.json\n{\n  \"user_{user_id}\": {\n    \"text_to_video\":              { \"model_id\": \"wan2.6-t2v\",        \"model_name\": \"Wan 2.6\",          \"credit\": 25, \"last_used\": \"...\" },\n    \"image_to_video\":             { \"model_id\": \"wan2.6-i2v\",        \"model_name\": \"Wan 2.6\",          \"credit\": 25, \"last_used\": \"...\" },\n    \"first_last_frame_to_video\":  { \"model_id\": \"kling-video-o1\",    \"model_name\": \"Kling O1\",        \"credit\": 48, \"last_used\": \"...\" },\n    \"reference_image_to_video\":   { \"model_id\": \"kling-video-o1\",    \"model_name\": \"Kling O1\",        \"credit\": 48, \"last_used\": \"...\" }\n  }\n}\n\nModel Selection Flow (Every Generation)\n\nStep 1: Get knowledge-ai recommendation (if installed)\n\nknowledge_recommended_model = read_ima_knowledge_ai()  # e.g., \"Wan 2.6\"\n\n\nStep 2: Check user preference\n\nuser_pref = load_prefs().get(f\"user_{user_id}\", {}).get(task_type)  # e.g., {\"model_id\": \"kling-video-o1\", ...}\n\n\nStep 3: Decide which model to use\n\nif user_pref exists:\n    use_model = user_pref[\"model_id\"]  # Highest priority\nelse:\n    use_model = knowledge_recommended_model or fallback_default\n\n\nStep 4: Check for mismatch (for later hint)\n\nif user_pref exists and knowledge_recommended_model != user_pref[\"model_id\"]:\n    mismatch = True  # Will add hint in success message\n\nWhen to Write (User Explicit Preference ONLY)\n\n✅ Save preference when user explicitly specifies a model:\n\nUser says\tAction\n用XXX / 换成XXX / 改用XXX\tSwitch to model XXX + save as preference\n以后都用XXX / 默认用XXX / always use XXX\tSave + confirm: ✅ 已记住！以后视频生成默认用 [XXX]\n我喜欢XXX / 我更喜欢XXX\tSave as preference\n\n❌ Do NOT save when:\n\nAgent auto-selects from knowledge-ai → not user preference\nAgent uses fallback default → not user preference\nUser says generic quality requests (see \"Clear Preference\" below) → clear preference instead\nWhen to Clear (User Abandons Preference)\n\n🗑️ Clear preference when user wants automatic selection:\n\nUser says\tAction\n用最好的 / 用最合适的 / best / recommended\tClear pref + use knowledge-ai recommendation\n推荐一个 / 你选一个 / 自动选择\tClear pref + use knowledge-ai recommendation\n用默认的 / 用新的\tClear pref + use knowledge-ai recommendation\n试试别的 / 换个试试 (without specific model)\tClear pref + use knowledge-ai recommendation\n重新推荐\tClear pref + use knowledge-ai recommendation\n\nImplementation:\n\ndel prefs[f\"user_{user_id}\"][task_type]\nsave_prefs(prefs)\n\n⭐ Model Selection Priority\n\nSelection flow:\n\nUser preference (if exists) → Highest priority, always respect\nima-knowledge-ai skill (if installed) → Professional recommendation based on task\nFallback defaults → Use table below (only if neither 1 nor 2 exists)\n\nImportant notes:\n\nUser preference is only saved when user explicitly specifies a model (see \"When to Write\" above)\nKnowledge-ai is always consulted (even when user pref exists) to detect mismatches\nWhen mismatch detected → add gentle hint in success message (does NOT interrupt generation)\n\nThe defaults below are FALLBACK only. User preferences have highest priority, then knowledge-ai recommendations.\nAlways default to the newest and most popular model. Do NOT default to the cheapest.\n\nTask\tDefault Model\tmodel_id\tversion_id\tCost\tWhy\ntext_to_video\tWan 2.6\twan2.6-t2v\twan2.6-t2v\t25 pts\t🔥 Most popular, balanced cost\ntext_to_video (premium)\tHailuo 2.3\tMiniMax-Hailuo-2.3\tMiniMax-Hailuo-2.3\t38 pts\tHigher quality\ntext_to_video (budget)\tVidu Q2\tviduq2\tviduq2\t5 pts\tLowest cost t2v\nimage_to_video\tWan 2.6\twan2.6-i2v\twan2.6-i2v\t25 pts\t🔥 Most popular i2v, 1080P\nimage_to_video (premium)\tKling 2.6\tkling-v2-6\tkling-v2-6\t40-160 pts\tPremium Kling i2v\nfirst_last_frame_to_video\tKling O1\tkling-video-o1\tkling-video-o1\t48 pts\tNewest Kling reasoning model\nreference_image_to_video\tKling O1\tkling-video-o1\tkling-video-o1\t48 pts\tBest reference fidelity\n\nSelection guide (production credits, sorted by popularity):\n\n🔥 Most popular text-to-video → Wan 2.6 (25 pts, balanced cost & quality)\nPremium text-to-video → Hailuo 2.3 (38 pts, higher quality)\nBudget text-to-video → Vidu Q2 (5 pts) or Hailuo 2.0 (12 pts)\n🔥 Most popular image_to_video → Wan 2.6 (25 pts)\nfirst_last_frame / reference → Kling O1 (48 pts)\nUser specifies cheapest → Vidu Q2 (5 pts) — only if explicitly requested\n🆕 Special Case: Pixverse Model Parameter (v1.0.7+)\n\nAuto-Inference Logic for Pixverse V5.5/V5/V4:\n\nProblem: Pixverse V5.5, V5, V4 lack model field in form_config from Product List API\nBackend Requirement: Backend requires model parameter (e.g., \"v5.5\", \"v5\", \"v4\")\nAuto-Fix: System automatically extracts version from model_name and injects it\nExample: model_name: \"Pixverse V5.5\" → auto-inject model: \"v5.5\"\nExample: model_name: \"Pixverse V4\" → auto-inject model: \"v4\"\nNote: V4.5 and V3.5 include model in form_config (no auto-inference needed)\nRelevant Task Types: All video modes (text_to_video, image_to_video, first_last_frame_to_video, reference_image_to_video)\n\nError Prevention:\n\nWithout auto-inference: err_code=400017 err_msg=Invalid value for model\nWith auto-inference (v1.0.7+): Pixverse V5.5/V5/V4 work seamlessly ✅\n\nWhy This Matters: Some Pixverse models (V5.5/V5/V4) have inconsistent form_config in the Product List API response. The auto-inference ensures all Pixverse versions work correctly without requiring users to manually specify the model parameter.\n\n💬 User Experience Protocol (IM / Feishu / Discord)\n\nVideo generation takes 1~6 minutes. Never let users wait in silence.\nAlways follow all 4 steps below, every single time.\n\n🚫 Never Say to Users\n❌ Never say\t✅ What users care about\nima_video_create.py / 脚本 / script\t—\n自动化脚本 / automation\t—\n自动处理产品列表 / 查询接口\t—\n自动解析参数 / 智能轮询\t—\nattribute_id / model_version / form_config\t—\nAPI 调用 / HTTP 请求 / 任何技术参数名\t—\n\nOnly tell users: model name · estimated time · credits · result URL · plain-language status.\n\nEstimated Generation Time per Model\nModel\tEstimated Time\tPoll Every\tSend Progress Every\nWan 2.6 (t2v / i2v)\t60~120s\t8s\t30s\nHailuo 2.0\t60~120s\t8s\t30s\nHailuo 2.3\t60~120s\t8s\t30s\nVidu Q1 / Q2\t60~120s\t8s\t30s\nPixverse V3.5~V5.5\t60~120s\t8s\t30s\nKling 1.6\t60~120s\t8s\t30s\nKling 2.1 Master\t90~180s\t8s\t40s\nSeeDance 1.0 / 1.5 Pro\t90~180s\t8s\t40s\nGoogle Veo 3.1 Fast\t90~180s\t8s\t40s\nKling 2.5 Turbo\t120~240s\t8s\t45s\nSora 2\t120~240s\t8s\t45s\nWan 2.5\t90~180s\t8s\t40s\nKling 2.6\t120~240s\t8s\t45s\nKling O1\t180~360s\t8s\t60s\nSora 2 Pro\t180~360s\t8s\t60s\nGoogle Veo 3.1\t120~300s\t8s\t50s\nGoogle Veo 3.0\t180~360s\t8s\t60s\n\nestimated_max_seconds = upper bound of the range (e.g. 180 for Kling 2.1 Master, 360 for Kling O1).\n\nStep 1 — Pre-Generation Notification (with Cost Transparency)\n\nBefore calling the create API, send this message immediately:\n\n🎬 开始生成视频，请稍候…\n• 模型：[Model Name]\n• 预计耗时：[X ~ Y 秒]（约 [X/60 ~ Y/60] 分钟）\n• 消耗积分：[N pts]\n\n视频生成需要一定时间，我会每隔一段时间汇报进度 🙏\n\n\nCost transparency (critical for video):\n\nFor balanced/default models (25 pts): \"使用 Wan 2.6（25 积分，最新 Wan）\"\nFor premium models (>50 pts):\nIf auto-selected: \"使用 Wan 2.6（25 积分）。若需更高质量可选 Kling 2.1 Master（150 积分）\"\nIf user explicit: \"使用高端模型 Kling 2.1 Master（150 积分），质量最佳\"\nFor budget (user explicit): \"使用 Vidu Q2（5 积分，最省钱选项）\"\n\nAdapt language to match the user. For expensive models (>50 pts), always mention cheaper alternatives unless user explicitly requested premium quality.\n\nAdapt language to match the user. English → 🎬 Starting video generation, this may take [X~Y] seconds. I'll update you on progress…\n\nStep 2 — Progress Updates\n\nPoll the task detail API every 8s.\nSend a progress update message every [Send Progress Every] seconds per the table above.\n\n⏳ 视频生成中… [P]%\n已等待 [elapsed]s，预计最长 [max]s\n\n\nProgress formula:\n\nP = min(95, floor(elapsed_seconds / estimated_max_seconds * 100))\n\nCap at 95% — never show 100% until the API returns success\nIf elapsed > estimated_max: keep P at 95% and append 「快了，稍等一下…」\nExample: elapsed=120s, max=180s → P = min(95, floor(120/180*100)) = min(95, 66) = 66%\nExample: elapsed=200s, max=180s → P = 95%（冻结 + 「快了，稍等一下…」）\nStep 3 — Success Notification (Push video via message tool)\n\nWhen task status = success:\n\n3.1 Send video player first (Feishu will render inline player):\n\n# Get result URL from script output or task detail API\nresult = get_task_result(task_id)\nvideo_url = result[\"medias\"][0][\"url\"]\n\n# Build caption\ncaption = f\"\"\"✅ 视频生成成功！\n• 模型：[Model Name]\n• 耗时：预计 [X~Y]s，实际 [actual]s\n• 消耗积分：[N pts]\n\n[视频描述]\"\"\"\n\n# Add mismatch hint if user pref conflicts with knowledge-ai recommendation\nif user_pref_exists and knowledge_recommended_model != used_model:\n    caption += f\"\"\"\n\n💡 提示：当前任务也许用 {knowledge_recommended_model} 也会不错（{reason}，{cost} pts）\"\"\"\n\n# Send video with caption\nmessage(\n    action=\"send\",\n    media=video_url,  # ⚠️ Use HTTPS URL directly, NOT local file path\n    caption=caption\n)\n\n\nMismatch hint example:\n\n✅ 视频生成成功！\n• 模型：Midjourney（你的偏好模型）\n• 耗时：45s\n• 消耗积分：8 pts\n\n💡 提示：当前任务也许用 Wan 2.6 也会不错（写实风格更合适，25 pts）\n\n[视频]\n\n\nImportant:\n\nHint is non-intrusive — does NOT interrupt generation\nOnly shown when user pref conflicts with knowledge-ai recommendation\nUser can ignore the hint; video is already delivered\n\n3.2 Then send link as text (for copying/sharing):\n\n# Send link message immediately after\nmessage(\n    action=\"send\",\n    message=f\"\"\"🔗 视频链接（方便复制分享）：\n{video_url}\"\"\"\n)\n\n\nCritical:\n\nUse the remote HTTPS URL directly as media parameter. Do NOT download to local file first.\nSend video first (for inline playback), then send link text (for copying/sharing).\n\nFor Feishu: Direct video URL → inline video player with play button. Local file path → file attachment (📎 path).\n\nStep 4 — Failure Notification\n\nWhen task status = failed or any API/network error, send:\n\n❌ 视频生成失败\n• 原因：[natural_language_error_message]\n• 建议改用：\n  - [Alt Model 1]（[特点]，[N pts]）\n  - [Alt Model 2]（[特点]，[N pts]）\n\n需要我帮你用其他模型重试吗？\n\n\n⚠️ CRITICAL: Error Message Translation\n\nNEVER show technical error messages to users. Always translate API errors into natural language.\nAPI key & credits: 密钥与积分管理入口为 imaclaw.ai（与 imastudio.com 同属 IMA 平台）。Key and subscription management: imaclaw.ai (same IMA platform as imastudio.com).\n\nTechnical Error\t❌ Never Say\t✅ Say Instead (Chinese)\t✅ Say Instead (English)\n401 Unauthorized 🆕\tInvalid API key / 401 Unauthorized\t❌ API密钥无效或未授权<br>💡 生成新密钥: https://www.imaclaw.ai/imaclaw/apikey\t❌ API key is invalid or unauthorized<br>💡 Generate API Key: https://www.imaclaw.ai/imaclaw/apikey\n4008 Insufficient points 🆕\tInsufficient points / Error 4008\t❌ 积分不足，无法创建任务<br>💡 购买积分: https://www.imaclaw.ai/imaclaw/subscription\t❌ Insufficient points to create this task<br>💡 Buy Credits: https://www.imaclaw.ai/imaclaw/subscription\n\"Invalid product attribute\" / \"Insufficient points\"\tInvalid product attribute\t生成参数配置异常，请稍后重试\tConfiguration error, please try again later\nError 6006 (credit mismatch)\tError 6006\t积分计算异常，系统正在修复\tPoints calculation error, system is fixing\nError 6010 (attribute_id mismatch)\tAttribute ID does not match\t模型参数不匹配，请尝试其他模型\tModel parameters incompatible, try another model\nerror 400 (bad request)\terror 400 / Bad request\t视频参数设置有误，请调整时长或分辨率\tVideo parameter error, adjust duration or resolution\nresource_status == 2\tResource status 2 / Failed\t视频生成遇到问题，建议换个模型试试\tVideo generation failed, try another model\nstatus == \"failed\" (no details)\tTask failed\t这次生成没成功，要不换个模型试试？\tGeneration unsuccessful, try a different model?\ntimeout\tTask timed out / Timeout error\t视频生成时间过长已超时，建议用更快的模型\tVideo generation took too long, try a faster model\nNetwork error / Connection refused\tConnection refused / Network error\t网络连接不稳定，请检查网络后重试\tNetwork connection unstable, check network and retry\nRate limit exceeded\t429 Too Many Requests / Rate limit\t请求过于频繁，请稍等片刻再试\tToo many requests, please wait a moment\nPrompt moderation (Sora 2 Pro only)\tContent policy violation\t提示词包含敏感内容（如人物），Sora 不支持，请换其他模型\tPrompt contains restricted content (e.g. people), Sora doesn't support it, try another model\nModel unavailable\tModel not available / 503 Service Unavailable\t当前模型暂时不可用，建议换个模型\tModel temporarily unavailable, try another model\nImage upload failed (image_to_video only)\tImage upload error\t输入图片处理失败，请检查图片格式或换张图\tInput image processing failed, check format or try another image\nDuration/resolution not supported\tParameter not supported\t该模型不支持此时长或分辨率，请调整参数\tModel doesn't support this duration or resolution, adjust parameters\n\nGeneric fallback (when error is unknown):\n\nChinese: 视频生成遇到问题，请稍后重试或换个模型试试\nEnglish: Video generation encountered an issue, please try again or use another model\n\nBest Practices:\n\nFocus on user action: Tell users what to do next, not what went wrong technically\nBe reassuring: Use phrases like \"建议换个模型试试\" instead of \"生成失败了\"\nAvoid blame: Never say \"你的提示词有问题\" → say \"提示词需要调整一下\"\nProvide alternatives: Always suggest 1-2 alternative models in the failure message\nVideo-specific:\nFor Sora content policy errors, recommend Wan 2.6 or Kling O1 (more permissive)\nFor timeout errors, recommend faster models (Vidu Q2, Hailuo 2.0)\nFor image input errors, suggest checking image format (HTTPS URL, valid JPEG/PNG)\n🆕 Include actionable links (v1.0.8+): For 401/4008 errors, provide clickable links to API key generation or credit purchase pages\n\n🆕 Enhanced Error Handling (v1.0.8):\n\nThe Reflection mechanism (3 automatic retries) now provides specific, actionable suggestions for common errors:\n\n401 Unauthorized: System suggests generating a new API key with clickable link\n4008 Insufficient Points: System suggests purchasing credits with clickable link\n500 Internal Server Error: Automatic parameter degradation (resolution: 1080P → 720P → 540P, duration: 15 → 10 → 5)\n6009 No Rule Match: Automatic parameter completion from credit_rules\n6010 Attribute Mismatch: Automatic credit_rule reselection\nTimeout: Helpful info with dashboard link for background task status\n🆕 Pixverse Model Parameter (v1.0.7+): Auto-inference for missing model parameter (V5.5/V5/V4)\n\nAll error handling is automatic and transparent — users receive natural language explanations with next steps.\n\nFailure fallback table:\n\nFailed Model\tFirst Alt\tSecond Alt\nKling 2.1 Master\tWan 2.6（3pts，速度快）\tHailuo 2.0（5pts）\nGoogle Veo 3.1\tKling 2.1 Master（10pts）\tSora 2（42pts）\nKling O1\tKling 2.1 Master（10pts）\tKling 2.5 Turbo（37pts）\nWan 2.6\tHailuo 2.0（5pts）\tKling 1.6（10pts）\nSora 2 / Pro\tKling 2.1 Master（10pts）\tGoogle Veo 3.1（162pts）\nSeeDance\tKling 2.1 Master（10pts）\tWan 2.6（3pts）\nAny / Unknown\tWan 2.6（3pts，最稳定）\tHailuo 2.0（5pts）\nSupported Models\n\n⚠️ Production Environment: Model availability validated against production API on 2026-02-27.\n\ntext_to_video (14 models)\nName\tmodel_id\tCost Range\tResolution\tDuration\tNotes\nWan 2.6 🌟\twan2.6-t2v\t25-120 pts\t720P/1080P\t5-15s\tBalanced, most popular\nHailuo 2.3\tMiniMax-Hailuo-2.3\t32+ pts\t768P\t6s\tLatest Hailuo\nHailuo 2.0\tMiniMax-Hailuo-02\t5+ pts\t768P\t6s\tBudget friendly\nVidu Q2\tviduq2\t5-70 pts\t540P-1080P\t5-10s\tFast generation\nSeeDance 1.5 Pro\tdoubao-seedance-1.5-pro\t20+ pts\t720P\t4s\tLatest SeeDance\nSora 2 Pro\tsora-2-pro\t122+ pts\t720P+\t4s+\tPremium OpenAI\nKling O1\tkling-video-o1\t48-120 pts\t—\t5-10s\tLatest Kling, with audio\nKling 2.6\tkling-v2-6\t80+ pts\t—\t5-10s\tPrevious Kling gen\nGoogle Veo 3.1\tveo-3.1-generate-preview\t70-330 pts\t720P-4K\t4-8s\tSOTA cinematic\nPixverse V5.5\tpixverse\t30+ pts\t540P-1080P\t5-8s\tLatest Pixverse\nPixverse V5\tpixverse\t25+ pts\t540P-1080P\t5-8s\t—\nPixverse V4.5\tpixverse\t20+ pts\t540P-1080P\t5-8s\t—\nPixverse V4\tpixverse\t12+ pts\t540P-1080P\t5-8s\t—\nPixverse V3.5\tpixverse\t12+ pts\t540P-1080P\t5-8s\t—\nimage_to_video (14 models)\nName\tmodel_id\tCost Range\tResolution\tDuration\tNotes\nWan 2.6 🔥\twan2.6-i2v\t25-120 pts\t720P/1080P\t5-15s\tMost popular i2v\nHailuo 2.3\tMiniMax-Hailuo-2.3\t32+ pts\t768P\t6s\tLatest Hailuo\nHailuo 2.0\tMiniMax-Hailuo-02\t25+ pts\t768P\t6s\t—\nVidu Q2 Pro\tviduq2-pro\t20-70 pts\t540P-1080P\t5-10s\tFast i2v\nSeeDance 1.5 Pro\tdoubao-seedance-1.5-pro\t47+ pts\t720P\t4s\tLatest SeeDance\nSora 2 Pro\tsora-2-pro\t122+ pts\t720P+\t4s+\tPremium OpenAI\nKling O1\tkling-video-o1\t48-120 pts\t—\t5-10s\tLatest Kling, with audio\nKling 2.6\tkling-v2-6\t80+ pts\t—\t5-10s\tPrevious Kling gen\nGoogle Veo 3.1\tveo-3.1-generate-preview\t70-330 pts\t720P-4K\t4-8s\tSOTA cinematic\nPixverse V5.5\tpixverse\t24-48 pts\t540P-1080P\t5-8s\tLatest Pixverse\nPixverse V5\tpixverse\t24-48 pts\t540P-1080P\t5-8s\t—\nPixverse V4.5\tpixverse\t12-48 pts\t540P-1080P\t5-8s\t—\nPixverse V4\tpixverse\t12-48 pts\t540P-1080P\t5-8s\t—\nPixverse V3.5\tpixverse\t12-48 pts\t540P-1080P\t5-8s\t—\nfirst_last_frame_to_video (10 models)\nName\tmodel_id\tCost Range\tDuration\tNotes\nHailuo 2.0\tMiniMax-Hailuo-02\t5+ pts\t6s\tBudget option\nVidu Q2 Pro\tviduq2-pro\t20-70 pts\t5-10s\tFast generation\nKling O1 🌟\tkling-video-o1\t48-120 pts\t5-10s\tRecommended default\nKling 2.6\tkling-v2-6\t80+ pts\t5-10s\t—\nGoogle Veo 3.1\tveo-3.1-generate-preview\t70-330 pts\t4-8s\tSOTA quality\nPixverse V5.5\tpixverse\t24-48 pts\t5-8s\tLatest Pixverse\nPixverse V5\tpixverse\t24-48 pts\t5-8s\t—\nPixverse V4.5\tpixverse\t12-48 pts\t5-8s\t—\nPixverse V4\tpixverse\t12-48 pts\t5-8s\t—\nPixverse V3.5\tpixverse\t12-48 pts\t5-8s\t—\nreference_image_to_video (9 models)\nName\tmodel_id\tCost Range\tDuration\tNotes\nVidu Q2\tviduq2\t10-70 pts\t5-10s\tFast, cost-effective\nKling O1 🌟\tkling-video-o1\t48-120 pts\t5-10s\tRecommended, strong reference\nGoogle Veo 3.1\tveo-3.1-generate-preview\t70-330 pts\t4-8s\tSOTA cinematic\nPixverse (generic)\tpixverse\t12-48 pts\t5-8s\tPixverse base\nPixverse V5.5\tpixverse\t12-48 pts\t5-8s\tLatest Pixverse\nPixverse V5\tpixverse\t12-48 pts\t5-8s\t—\nPixverse V4.5\tpixverse\t12-48 pts\t5-8s\t—\nPixverse V4\tpixverse\t12-48 pts\t5-8s\t—\nPixverse V3.5\tpixverse\t12-48 pts\t5-8s\t—\n\nProduction Notes (2026-02-27):\n\n✅ Active models: 14 t2v, 14 i2v, 10 first_last_frame, 9 reference_image\n🔥 Most popular: Wan 2.6 (both t2v and i2v)\n🌟 Recommended defaults: Wan 2.6 (balanced), Kling O1 (premium with audio)\nEnvironment\n\nBase URL: https://api.imastudio.com\n\nRequired/recommended headers for all /open/v1/ endpoints:\n\nHeader\tRequired\tValue\tNotes\nAuthorization\t✅\tBearer ima_your_api_key_here\tAPI key authentication\nx-app-source\t✅\tima_skills\tFixed value — identifies skill-originated requests\nx_app_language\trecommended\ten / zh\tProduct label language; defaults to en if omitted\nAuthorization: Bearer ima_your_api_key_here\nx-app-source: ima_skills\nx_app_language: en\n\n⚠️ MANDATORY: Always Query Product List First\n\nCRITICAL: You MUST call /open/v1/product/list BEFORE creating any task.\nThe attribute_id field is REQUIRED in the create request. If it is 0 or missing, you get:\n\"Invalid product attribute\" → \"Insufficient points\" → task fails completely.\nNEVER construct a create request from the model table alone. Always fetch the product first.\n\nHow to get attribute_id\n# Step 1: Query product list for the target category\nGET /open/v1/product/list?app=ima&platform=web&category=text_to_video\n# (or image_to_video / first_last_frame_to_video / reference_image_to_video)\n\n# Step 2: Walk the V2 tree to find your model (type=3 leaf nodes only)\nfor group in response[\"data\"]:\n    for version in group.get(\"children\", []):\n        if version[\"type\"] == \"3\" and version[\"model_id\"] == target_model_id:\n            attribute_id  = version[\"credit_rules\"][0][\"attribute_id\"]\n            credit        = version[\"credit_rules\"][0][\"points\"]\n            model_version = version[\"id\"]    # = version_id\n            model_name    = version[\"name\"]\n            form_defaults = {f[\"field\"]: f[\"value\"] for f in version[\"form_config\"]}\n\nQuick Reference: Known attribute_ids\n\n⚠️ Production warning: attribute_id and credit values change frequently. Always call /open/v1/product/list at runtime; table below is pre-queried reference (2026-02-27).\n\nModel\tTask\tmodel_id\tattribute_id\tcredit\tNotes\nWan 2.6 (720P, 5s)\ttext_to_video\twan2.6-t2v\t2057\t25 pts\tDefault, balanced\nWan 2.6 (1080P, 5s)\ttext_to_video\twan2.6-t2v\t2058\t40 pts\t—\nWan 2.6 (720P, 10s)\ttext_to_video\twan2.6-t2v\t2059\t50 pts\t—\nWan 2.6 (1080P, 10s)\ttext_to_video\twan2.6-t2v\t2060\t80 pts\t—\nWan 2.6 (720P, 15s)\ttext_to_video\twan2.6-t2v\t2061\t75 pts\t—\nWan 2.6 (1080P, 15s)\ttext_to_video\twan2.6-t2v\t2062\t120 pts\t—\nKling O1 (5s, std)\ttext_to_video\tkling-video-o1\t2313\t48 pts\tLatest Kling\nKling O1 (5s, pro)\ttext_to_video\tkling-video-o1\t2314\t60 pts\t—\nKling O1 (10s, std)\ttext_to_video\tkling-video-o1\t2315\t96 pts\t—\nKling O1 (10s, pro)\ttext_to_video\tkling-video-o1\t2316\t120 pts\t—\nAll others\tany\t—\t→ query /open/v1/product/list\t—\tAlways runtime query\nCommon Mistakes (and resulting errors)\nMistake\tError\nattribute_id is 0 or missing\t\"Invalid product attribute\" → Insufficient points\nattribute_id outdated (production changed)\tSame errors; always query product list first\nattribute_id doesn't match parameter combination\tError 6010: \"Attribute ID does not match the calculated rule\"\nprompt at outer level instead of parameters.parameters.prompt\tPrompt ignored\ncast missing from inner parameters\tBilling validation failure\ncredit wrong / missing\tError 6006\nmodel_name or model_version missing\tWrong model routing\n\n⚠️ Critical for Google Veo 3.1 and multi-rule models:\n\nModels like Google Veo 3.1 have multiple credit_rules, each with a different attribute_id for different parameter combinations:\n\n720p + 4s + optimized → attribute_id A\n720p + 8s + optimized → attribute_id B\n4K + 4s + high → attribute_id C\n\nThe script automatically selects the correct attribute_id by matching your parameters (duration, resolution, compression_quality, generate_audio) against each rule's attributes. If the match fails, you get error 6010.\n\nFix: The bundled script now checks these video-specific parameters for smart credit_rule selection. Always use the script, not manual API construction.\n\nCore Flow\n1. GET /open/v1/product/list?app=ima&platform=web&category=<type>\n   → REQUIRED: Get attribute_id, credit, model_version, form_config defaults\n\n[image_to_video / first_last_frame / reference_image tasks only]\n2. Upload input image(s) → get public HTTPS URL(s)\n   → See \"Image Upload\" section below\n\n3. POST /open/v1/tasks/create\n   → Must include: attribute_id, model_name, model_version, credit, cast, prompt (nested!)\n\n4. POST /open/v1/tasks/detail  {task_id: \"...\"}\n   → Poll every 8s until medias[].resource_status == 1\n   → Extract url (mp4) and cover (thumbnail) from completed media\n\n\nVideo generation is slower than image — poll every 8s and set timeout to 600s.\n\nImage Upload (Required for Video Tasks with Image Input)\n\nThe IMA Open API does NOT accept raw bytes or base64 images. All input images must be public HTTPS URLs.\n\nScript behavior: --input-images accepts both URLs and local file paths. Local files are automatically uploaded to IMA CDN by the script — no separate upload step needed when calling the script.\n\nFor image_to_video, first_last_frame_to_video, reference_image_to_video: when a user provides an image (local file, base64, or non-public URL), you can pass a local path to the script (it will upload), or upload first in code to get a URL.\n\ndef prepare_image_url(source) -> str:\n    \"\"\"Convert any image source to a public HTTPS URL.\n    \n    - If source is already a public HTTPS URL: return as-is\n    - If source is a local file path or bytes: upload to hosting first\n    \"\"\"\n    if isinstance(source, str) and source.startswith(\"https://\"):\n        return source  # already public, use directly\n\n    # Option 1: IMA OSS (requires OSS credentials)\n    #   objectName = f\"aiagent/src/d/{date}/in/{uuid}.jpg\"\n    #   bucket.put_object(objectName, image_bytes)\n    #   return f\"https://ima.esxscloud.com/{objectName}\"\n\n    # Option 2: Any public image hosting (imgbb example)\n    import base64, requests\n    if isinstance(source, str):\n        with open(source, \"rb\") as f:\n            b64 = base64.b64encode(f.read()).decode()\n    else:\n        b64 = base64.b64encode(source).decode()\n    r = requests.post(\"https://api.imgbb.com/1/upload\",\n                      data={\"key\": IMGBB_API_KEY, \"image\": b64})\n    r.raise_for_status()\n    return r.json()[\"data\"][\"url\"]\n\n# For first_last_frame: prepare both frames\nfirst_url = prepare_image_url(\"/path/to/first.jpg\")\nlast_url  = prepare_image_url(\"/path/to/last.jpg\")\nsrc_img_url = [first_url, last_url]  # index 0 = first, index 1 = last\n\n\nNote: URLs must be publicly accessible — not localhost, private network, or auth-gated endpoints.\n\nSupported Task Types\ncategory\tCapability\tInput\ntext_to_video\tText → Video\tprompt\nimage_to_video\tImage → Video\tprompt + upload_img_src\nfirst_last_frame_to_video\tFirst+Last Frame → Video\tprompt + src_img_url[2]\nreference_image_to_video\tReference Image → Video\tprompt + src_img_url[1+]\nDetail API status values\nField\tType\tValues\nresource_status\tint or null\t0=处理中, 1=可用, 2=失败, 3=已删除；null 当作 0\nstatus\tstring\t\"pending\", \"processing\", \"success\", \"failed\"\nresource_status\tstatus\tAction\n0 or null\tpending / processing\tKeep polling\n1\tsuccess (or completed)\tStop when all medias are 1; read url / cover\n1\tfailed\tStop, handle error\n2 / 3\tany\tStop, handle error\n\nImportant: Treat resource_status: null as 0. Stop only when all medias have resource_status == 1. Check status != \"failed\" when rs=1.\n\nAPI 1: Product List\nGET /open/v1/product/list?app=ima&platform=web&category=text_to_video\n\n\nReturns a V2 tree structure: type=2 nodes are model groups, type=3 nodes are versions (leaves). Only type=3 nodes contain credit_rules and form_config.\n\nHow to pick a version:\n\nTraverse nodes to find type=3 leaves\nUse model_id and id (= model_version) from the leaf\nPick credit_rules[].attribute_id matching desired quality\nUse form_config[].value as default parameters values (duration, resolution, aspect_ratio, etc.)\nAPI 2: Create Task\nPOST /open/v1/tasks/create\n\ntext_to_video — Verified ✅\n\nNo image input. src_img_url: [], input_images: [].\n\n{\n  \"task_type\": \"text_to_video\",\n  \"enable_multi_model\": false,\n  \"src_img_url\": [],\n  \"parameters\": [{\n    \"attribute_id\":  4838,\n    \"model_id\":      \"wan2.6-t2v\",\n    \"model_name\":    \"Wan 2.6\",\n    \"model_version\": \"wan2.6-t2v\",\n    \"app\":           \"ima\",\n    \"platform\":      \"web\",\n    \"category\":      \"text_to_video\",\n    \"credit\":        25,\n    \"parameters\": {\n      \"prompt\":          \"a puppy dancing happily, sunny meadow\",\n      \"negative_prompt\": \"\",\n      \"prompt_extend\":   false,\n      \"duration\":        5,\n      \"resolution\":      \"1080P\",\n      \"aspect_ratio\":    \"16:9\",\n      \"shot_type\":       \"single\",\n      \"seed\":            -1,\n      \"n\":               1,\n      \"input_images\":    [],\n      \"cast\":            {\"points\": 3, \"attribute_id\": 4838}\n    }\n  }]\n}\n\n\nVideo-specific fields from form_config: duration (seconds), resolution, aspect_ratio, shot_type, negative_prompt, prompt_extend. Response medias[].cover = first-frame thumbnail JPEG.\n\nimage_to_video\n\nInput image goes in top-level src_img_url and parameters.input_images:\n\n{\n  \"task_type\": \"image_to_video\",\n  \"enable_multi_model\": false,\n  \"src_img_url\": [\"https://example.com/scene.jpg\"],\n  \"parameters\": [{\n    \"attribute_id\":  \"<from credit_rules>\",\n    \"model_id\":      \"<model_id>\",\n    \"model_name\":    \"<model_name>\",\n    \"model_version\": \"<version_id>\",\n    \"app\":           \"ima\",\n    \"platform\":      \"web\",\n    \"category\":      \"image_to_video\",\n    \"credit\":        \"<points>\",\n    \"parameters\": {\n      \"prompt\":       \"bring this landscape alive\",\n      \"n\":            1,\n      \"input_images\": [\"https://example.com/scene.jpg\"],\n      \"cast\":         {\"points\": \"<points>\", \"attribute_id\": \"<attribute_id>\"}\n    }\n  }]\n}\n\nfirst_last_frame_to_video\n\nProvide exactly 2 images: index 0 = first frame, index 1 = last frame:\n\n{\n  \"task_type\": \"first_last_frame_to_video\",\n  \"src_img_url\": [\"https://example.com/first.jpg\", \"https://example.com/last.jpg\"],\n  \"parameters\": [{\n    \"category\": \"first_last_frame_to_video\",\n    \"parameters\": {\n      \"prompt\": \"smooth transition\",\n      \"n\": 1,\n      \"input_images\": [\"https://example.com/first.jpg\", \"https://example.com/last.jpg\"],\n      \"cast\": {\"points\": \"<points>\", \"attribute_id\": \"<attribute_id>\"}\n    }\n  }]\n}\n\nreference_image_to_video\n\nProvide 1 or more reference images in src_img_url:\n\n{\n  \"task_type\": \"reference_image_to_video\",\n  \"src_img_url\": [\"https://example.com/ref.jpg\"],\n  \"parameters\": [{\n    \"category\": \"reference_image_to_video\",\n    \"parameters\": {\n      \"prompt\": \"dynamic video based on reference\",\n      \"n\": 1,\n      \"input_images\": [\"https://example.com/ref.jpg\"],\n      \"cast\": {\"points\": \"<points>\", \"attribute_id\": \"<attribute_id>\"}\n    }\n  }]\n}\n\n\nKey fields:\n\nField\tRequired\tDescription\nparameters[].credit\t✅\tMust equal credit_rules[].points. Error 6006 if wrong.\nparameters[].parameters.prompt\t✅\tPrompt must be nested here, NOT at top level.\nparameters[].parameters.cast\t✅\t{\"points\": N, \"attribute_id\": N} — mirror of credit.\nparameters[].parameters.n\t✅\tNumber of outputs (usually 1).\ntop-level src_img_url\timage tasks\tImage URL(s); 2 images for first_last_frame.\nparameters[].parameters.input_images\timage tasks\tMust mirror src_img_url.\nparameters[].parameters.duration\ttext_to_video\tVideo duration in seconds (from form_config).\nparameters[].parameters.resolution\ttext_to_video\te.g. \"1080P\" (from form_config).\nparameters[].parameters.aspect_ratio\ttext_to_video\te.g. \"16:9\" (from form_config).\n\nResponse: data.id = task ID for polling.\n\nAPI 3: Task Detail (Poll)\nPOST /open/v1/tasks/detail\n{\"task_id\": \"<id from create response>\"}\n\n\nPoll every 8s for video tasks. Completed response:\n\n{\n  \"id\": \"task_abc\",\n  \"medias\": [{\n    \"resource_status\": 1,\n    \"url\":   \"https://cdn.../output.mp4\",\n    \"cover\": \"https://cdn.../cover.jpg\",\n    \"duration_str\": \"5s\",\n    \"format\": \"mp4\"\n  }]\n}\n\n\nOutput fields: url (mp4), cover (first-frame thumbnail JPEG), duration_str, format.\n\nCommon Mistakes\nMistake\tFix\nPolling too fast for video\tUse 8s interval, not 2–3s\nMissing duration/resolution/aspect_ratio\tRead defaults from form_config\nWrong credit value\tMust exactly match credit_rules[].points (error 6006)\nsrc_img_url and input_images mismatch\tBoth must contain the same image URL(s)\nOnly 1 image for first_last_frame\tRequires exactly 2 images (first + last)\nPlacing prompt at param top-level\tprompt must be inside parameters[].parameters\nPython Example\nimport time\nimport requests\n\nBASE_URL = \"https://api.imastudio.com\"\nAPI_KEY  = \"ima_your_key_here\"\nHEADERS  = {\n    \"Authorization\":  f\"Bearer {API_KEY}\",\n    \"Content-Type\":   \"application/json\",\n    \"x-app-source\":   \"ima_skills\",\n    \"x_app_language\": \"en\",\n}\n\n\ndef get_products(category: str) -> list:\n    \"\"\"Returns flat list of type=3 version nodes from V2 tree.\"\"\"\n    r = requests.get(\n        f\"{BASE_URL}/open/v1/product/list\",\n        headers=HEADERS,\n        params={\"app\": \"ima\", \"platform\": \"web\", \"category\": category},\n    )\n    r.raise_for_status()\n    nodes = r.json()[\"data\"]\n    versions = []\n    for node in nodes:\n        for child in node.get(\"children\") or []:\n            if child.get(\"type\") == \"3\":\n                versions.append(child)\n            for gc in child.get(\"children\") or []:\n                if gc.get(\"type\") == \"3\":\n                    versions.append(gc)\n    return versions\n\n\ndef create_video_task(task_type: str, prompt: str, product: dict, src_img_url: list = None, **extra) -> str:\n    \"\"\"Returns task_id. src_img_url: list of image URLs (1+ for image tasks, 2 for first_last_frame).\"\"\"\n    src_img_url = src_img_url or []\n    rule = product[\"credit_rules\"][0]\n    form_defaults = {f[\"field\"]: f[\"value\"] for f in product.get(\"form_config\", []) if f.get(\"value\") is not None}\n\n    nested_params = {\n        \"prompt\": prompt,\n        \"n\":      1,\n        \"input_images\": src_img_url,\n        \"cast\":   {\"points\": rule[\"points\"], \"attribute_id\": rule[\"attribute_id\"]},\n        **form_defaults,\n    }\n    nested_params.update({k: v for k, v in extra.items()\n                          if k in (\"duration\", \"resolution\", \"aspect_ratio\", \"shot_type\",\n                                   \"negative_prompt\", \"prompt_extend\", \"seed\")})\n\n    body = {\n        \"task_type\":          task_type,\n        \"enable_multi_model\": False,\n        \"src_img_url\":        src_img_url,\n        \"parameters\": [{\n            \"attribute_id\":  rule[\"attribute_id\"],\n            \"model_id\":      product[\"model_id\"],\n            \"model_name\":    product[\"name\"],\n            \"model_version\": product[\"id\"],\n            \"app\":           \"ima\",\n            \"platform\":      \"web\",\n            \"category\":      task_type,\n            \"credit\":        rule[\"points\"],\n            \"parameters\":    nested_params,\n        }],\n    }\n    r = requests.post(f\"{BASE_URL}/open/v1/tasks/create\", headers=HEADERS, json=body)\n    r.raise_for_status()\n    return r.json()[\"data\"][\"id\"]\n\n\ndef poll(task_id: str, interval: int = 8, timeout: int = 600) -> dict:\n    deadline = time.time() + timeout\n    while time.time() < deadline:\n        r = requests.post(f\"{BASE_URL}/open/v1/tasks/detail\", headers=HEADERS, json={\"task_id\": task_id})\n        r.raise_for_status()\n        task   = r.json()[\"data\"]\n        medias = task.get(\"medias\", [])\n        if medias:\n            if any(m.get(\"status\") == \"failed\" for m in medias):\n                raise RuntimeError(f\"Task failed: {task_id}\")\n            rs = lambda m: m.get(\"resource_status\") if m.get(\"resource_status\") is not None else 0\n            if any(rs(m) == 2 for m in medias):\n                raise RuntimeError(f\"Task failed: {task_id}\")\n            if all(rs(m) == 1 for m in medias):\n                return task\n        time.sleep(interval)\n    raise TimeoutError(f\"Task timed out: {task_id}\")\n\n\n# text_to_video (Verified: Wan 2.6, response includes cover thumbnail)\nproducts = get_products(\"text_to_video\")\nwan26    = next(p for p in products if p[\"model_id\"] == \"wan2.6-t2v\")\ntask_id  = create_video_task(\n    \"text_to_video\", \"a puppy dancing happily, sunny meadow\", wan26,\n    duration=5, resolution=\"1080P\", aspect_ratio=\"16:9\",\n    shot_type=\"single\", negative_prompt=\"\", prompt_extend=False, seed=-1,\n)\nresult = poll(task_id)\nprint(result[\"medias\"][0][\"url\"])    # mp4 URL\nprint(result[\"medias\"][0][\"cover\"])  # first-frame thumbnail JPEG\n\n# image_to_video\nproducts = get_products(\"image_to_video\")\ntask_id  = create_video_task(\"image_to_video\", \"bring this landscape alive\", products[0],\n                             src_img_url=[\"https://example.com/scene.jpg\"])\nresult   = poll(task_id)\nprint(result[\"medias\"][0][\"url\"])\n\n# first_last_frame_to_video (exactly 2 images required)\nproducts = get_products(\"first_last_frame_to_video\")\nframes   = [\"https://example.com/first.jpg\", \"https://example.com/last.jpg\"]\ntask_id  = create_video_task(\"first_last_frame_to_video\", \"smooth transition\", products[0],\n                             src_img_url=frames)\nresult   = poll(task_id)\nprint(result[\"medias\"][0][\"url\"])\n\n# reference_image_to_video\nproducts = get_products(\"reference_image_to_video\")\ntask_id  = create_video_task(\"reference_image_to_video\", \"dynamic video\", products[0],\n                             src_img_url=[\"https://example.com/ref.jpg\"])\nresult   = poll(task_id)\nprint(result[\"medias\"][0][\"url\"])\n\nSupported Models & Search Terms\n\nModels: Wan 2.6, Kling O1, Kling 2.6, Google Veo 3.1, Sora 2 Pro, Pixverse V5.5, Hailuo 2.0, Hailuo 2.3, MiniMax Hailuo, SeeDance 1.5 Pro, Vidu Q2\n\nCapabilities: video generation, text-to-video, image-to-video, AI video, character animation, product demo, social media clips, storytelling, explainer video"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/allenfancy-gan/ima-video-ai",
    "publisherUrl": "https://clawhub.ai/allenfancy-gan/ima-video-ai",
    "owner": "allenfancy-gan",
    "version": "1.0.6",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/ima-video-ai",
    "downloadUrl": "https://openagent3.xyz/downloads/ima-video-ai",
    "agentUrl": "https://openagent3.xyz/skills/ima-video-ai/agent",
    "manifestUrl": "https://openagent3.xyz/skills/ima-video-ai/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/ima-video-ai/agent.md"
  }
}