{
  "schemaVersion": "1.0",
  "item": {
    "slug": "ml-model-eval-benchmark",
    "name": "Ml Model Eval Benchmark",
    "source": "tencent",
    "type": "skill",
    "category": "AI 智能",
    "sourceUrl": "https://clawhub.ai/0x-Professor/ml-model-eval-benchmark",
    "canonicalUrl": "https://clawhub.ai/0x-Professor/ml-model-eval-benchmark",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/ml-model-eval-benchmark",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=ml-model-eval-benchmark",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SKILL.md",
      "agents/openai.yaml",
      "references/benchmarking-guide.md",
      "scripts/benchmark_models.py"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-30T16:55:25.780Z",
      "expiresAt": "2026-05-07T16:55:25.780Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
        "contentDisposition": "attachment; filename=\"network-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/ml-model-eval-benchmark"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/ml-model-eval-benchmark",
    "agentPageUrl": "https://openagent3.xyz/skills/ml-model-eval-benchmark/agent",
    "manifestUrl": "https://openagent3.xyz/skills/ml-model-eval-benchmark/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/ml-model-eval-benchmark/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Overview",
        "body": "Produce consistent model ranking outputs from metric-weighted evaluation inputs."
      },
      {
        "title": "Workflow",
        "body": "Define metric weights and accepted metric ranges.\nIngest model metrics for each candidate.\nCompute weighted score and ranking.\nExport leaderboard and promotion recommendation."
      },
      {
        "title": "Use Bundled Resources",
        "body": "Run scripts/benchmark_models.py to generate benchmark outputs.\nRead references/benchmarking-guide.md for weighting and tie-break guidance."
      },
      {
        "title": "Guardrails",
        "body": "Keep metric names and scales consistent across candidates.\nRecord weighting assumptions in the output."
      }
    ],
    "body": "ML Model Eval Benchmark\nOverview\n\nProduce consistent model ranking outputs from metric-weighted evaluation inputs.\n\nWorkflow\nDefine metric weights and accepted metric ranges.\nIngest model metrics for each candidate.\nCompute weighted score and ranking.\nExport leaderboard and promotion recommendation.\nUse Bundled Resources\nRun scripts/benchmark_models.py to generate benchmark outputs.\nRead references/benchmarking-guide.md for weighting and tie-break guidance.\nGuardrails\nKeep metric names and scales consistent across candidates.\nRecord weighting assumptions in the output."
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/0x-Professor/ml-model-eval-benchmark",
    "publisherUrl": "https://clawhub.ai/0x-Professor/ml-model-eval-benchmark",
    "owner": "0x-Professor",
    "version": "0.1.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/ml-model-eval-benchmark",
    "downloadUrl": "https://openagent3.xyz/downloads/ml-model-eval-benchmark",
    "agentUrl": "https://openagent3.xyz/skills/ml-model-eval-benchmark/agent",
    "manifestUrl": "https://openagent3.xyz/skills/ml-model-eval-benchmark/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/ml-model-eval-benchmark/agent.md"
  }
}