{
  "schemaVersion": "1.0",
  "item": {
    "slug": "models",
    "name": "Models",
    "source": "tencent",
    "type": "skill",
    "category": "AI 智能",
    "sourceUrl": "https://clawhub.ai/ivangdavila/models",
    "canonicalUrl": "https://clawhub.ai/ivangdavila/models",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/models",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=models",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SKILL.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "slug": "models",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-05-02T11:59:41.944Z",
      "expiresAt": "2026-05-09T11:59:41.944Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=models",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=models",
        "contentDisposition": "attachment; filename=\"models-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null,
        "slug": "models"
      },
      "scope": "item",
      "summary": "Item download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this item.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/models"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/models",
    "agentPageUrl": "https://openagent3.xyz/skills/models/agent",
    "manifestUrl": "https://openagent3.xyz/skills/models/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/models/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Core Principle",
        "body": "No single model is best for everything — match model to task, not brand loyalty\nA $0.75/M model often performs identically to a $40/M model for simple tasks\nTest cheaper alternatives before committing to expensive defaults"
      },
      {
        "title": "Cost Reality",
        "body": "Output tokens cost 3-10x more than input tokens — advertised input prices are misleading\nCalculate real cost with your actual input/output ratio, not theoretical pricing\nBatch/async APIs offer 50% discounts — use them for non-real-time workloads\nPrompt caching reduces repeated context costs significantly"
      },
      {
        "title": "Coding",
        "body": "Architecture and design decisions: Use frontier models (Opus-class) — they catch subtle issues cheaper models miss\nDay-to-day implementation: Mid-tier models (Sonnet-class) offer 90% of capability at 20% of cost\nParallel subtasks and scaffolding: Fast/cheap models (Haiku-class) — speed matters more than depth\nCode review: Thorough models catch async bugs and edge cases that fast models miss"
      },
      {
        "title": "Non-Coding",
        "body": "Complex reasoning and math: Extended thinking modes justify their cost for hard problems\nGeneral assistance: User preference studies favor models different from benchmark leaders\nHigh-volume simple queries: Cheapest models perform identically — don't overpay\nLong documents: Context window size determines viability — some offer 1M+ tokens"
      },
      {
        "title": "Claude Code vs Codex CLI",
        "body": "Claude Code: Fast iteration, UI/frontend, interactive debugging — developer stays in the loop\nCodex CLI: Long-running background tasks, large refactors, set-and-forget — accuracy over speed\nBoth tools have value — use Claude Code for implementation, Codex for final review\nFile size limits differ — Claude Code struggles with files over 25K tokens"
      },
      {
        "title": "Orchestration Pattern",
        "body": "Planning phase: Use expensive/smart models to break down problems correctly\nExecution phase: Use balanced models, parallelize where possible\nReview phase: Use accurate models for final verification — catches bugs others miss\nThis pattern beats using one model for everything at similar total cost"
      },
      {
        "title": "Benchmark Skepticism",
        "body": "Benchmark scores vary 2-3x based on scaffolding and evaluation method\nUser preference rankings differ significantly from benchmark rankings\nSWE-bench scores don't predict real-world coding quality reliably\nModels drift week-to-week — last month's best may underperform today"
      },
      {
        "title": "Open Source Viability",
        "body": "DeepSeek and similar models approach frontier performance at 1/50th API cost\nSelf-hosting eliminates API rate limits and price variability\nMIT/Apache licensed models allow commercial use without restrictions\nConsider for: data privacy, cost predictability, custom fine-tuning"
      },
      {
        "title": "Model Selection Mistakes",
        "body": "Using premium models for chatbot responses that cheap models handle identically\nIgnoring context window limits — chunking long documents costs more than using large-context models\nExpecting consistency — same prompt gives different results over time as models update\nTrusting speed over accuracy for complex tasks — fast models trade thoroughness for latency"
      },
      {
        "title": "Practical Guidelines",
        "body": "Default to mid-tier for most tasks, escalate to frontier only when quality suffers\nTrack actual costs per workflow, not just per-token rates\nBuild verification into pipelines — don't trust any model blindly\nReassess model choices quarterly — pricing and capabilities shift constantly"
      }
    ],
    "body": "AI Model Selection Rules\nCore Principle\nNo single model is best for everything — match model to task, not brand loyalty\nA $0.75/M model often performs identically to a $40/M model for simple tasks\nTest cheaper alternatives before committing to expensive defaults\nCost Reality\nOutput tokens cost 3-10x more than input tokens — advertised input prices are misleading\nCalculate real cost with your actual input/output ratio, not theoretical pricing\nBatch/async APIs offer 50% discounts — use them for non-real-time workloads\nPrompt caching reduces repeated context costs significantly\nTask Matching\nCoding\nArchitecture and design decisions: Use frontier models (Opus-class) — they catch subtle issues cheaper models miss\nDay-to-day implementation: Mid-tier models (Sonnet-class) offer 90% of capability at 20% of cost\nParallel subtasks and scaffolding: Fast/cheap models (Haiku-class) — speed matters more than depth\nCode review: Thorough models catch async bugs and edge cases that fast models miss\nNon-Coding\nComplex reasoning and math: Extended thinking modes justify their cost for hard problems\nGeneral assistance: User preference studies favor models different from benchmark leaders\nHigh-volume simple queries: Cheapest models perform identically — don't overpay\nLong documents: Context window size determines viability — some offer 1M+ tokens\nClaude Code vs Codex CLI\nClaude Code: Fast iteration, UI/frontend, interactive debugging — developer stays in the loop\nCodex CLI: Long-running background tasks, large refactors, set-and-forget — accuracy over speed\nBoth tools have value — use Claude Code for implementation, Codex for final review\nFile size limits differ — Claude Code struggles with files over 25K tokens\nOrchestration Pattern\nPlanning phase: Use expensive/smart models to break down problems correctly\nExecution phase: Use balanced models, parallelize where possible\nReview phase: Use accurate models for final verification — catches bugs others miss\nThis pattern beats using one model for everything at similar total cost\nBenchmark Skepticism\nBenchmark scores vary 2-3x based on scaffolding and evaluation method\nUser preference rankings differ significantly from benchmark rankings\nSWE-bench scores don't predict real-world coding quality reliably\nModels drift week-to-week — last month's best may underperform today\nOpen Source Viability\nDeepSeek and similar models approach frontier performance at 1/50th API cost\nSelf-hosting eliminates API rate limits and price variability\nMIT/Apache licensed models allow commercial use without restrictions\nConsider for: data privacy, cost predictability, custom fine-tuning\nModel Selection Mistakes\nUsing premium models for chatbot responses that cheap models handle identically\nIgnoring context window limits — chunking long documents costs more than using large-context models\nExpecting consistency — same prompt gives different results over time as models update\nTrusting speed over accuracy for complex tasks — fast models trade thoroughness for latency\nPractical Guidelines\nDefault to mid-tier for most tasks, escalate to frontier only when quality suffers\nTrack actual costs per workflow, not just per-token rates\nBuild verification into pipelines — don't trust any model blindly\nReassess model choices quarterly — pricing and capabilities shift constantly"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/ivangdavila/models",
    "publisherUrl": "https://clawhub.ai/ivangdavila/models",
    "owner": "ivangdavila",
    "version": "1.0.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/models",
    "downloadUrl": "https://openagent3.xyz/downloads/models",
    "agentUrl": "https://openagent3.xyz/skills/models/agent",
    "manifestUrl": "https://openagent3.xyz/skills/models/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/models/agent.md"
  }
}