{
  "schemaVersion": "1.0",
  "item": {
    "slug": "openclaw-gpu-bridge",
    "name": "GPU Bridge",
    "source": "tencent",
    "type": "skill",
    "category": "AI 智能",
    "sourceUrl": "https://clawhub.ai/homeofe/openclaw-gpu-bridge",
    "canonicalUrl": "https://clawhub.ai/homeofe/openclaw-gpu-bridge",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/openclaw-gpu-bridge",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=openclaw-gpu-bridge",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "CHANGELOG.md",
      "README.md",
      "SKILL.md",
      "gpu-service/README.md",
      "gpu-service/__init__.py",
      "gpu-service/device.py"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-30T16:55:25.780Z",
      "expiresAt": "2026-05-07T16:55:25.780Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
        "contentDisposition": "attachment; filename=\"network-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/openclaw-gpu-bridge"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/openclaw-gpu-bridge",
    "agentPageUrl": "https://openagent3.xyz/skills/openclaw-gpu-bridge/agent",
    "manifestUrl": "https://openagent3.xyz/skills/openclaw-gpu-bridge/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/openclaw-gpu-bridge/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "@elvatis_com/openclaw-gpu-bridge",
        "body": "OpenClaw plugin to offload ML tasks (BERTScore + embeddings) to one or many remote GPU hosts."
      },
      {
        "title": "v0.2 Highlights",
        "body": "Multi-GPU host pool (hosts[]) with:\n\nround-robin or least-busy load balancing\nautomatic failover\nperiodic host health checks\n\n\nBackward compatibility with v0.1 (serviceUrl / url)\nFlexible model selection per request (model / model_type)\nGPU service model caching (on-demand loading)\nOptional transfer visibility via /status endpoint + batch progress logs"
      },
      {
        "title": "Tools",
        "body": "gpu_health\ngpu_info\ngpu_status (new in v0.2)\ngpu_bertscore\ngpu_embed"
      },
      {
        "title": "v0.2 (recommended)",
        "body": "{\n  \"plugins\": {\n    \"@elvatis_com/openclaw-gpu-bridge\": {\n      \"hosts\": [\n        {\n          \"name\": \"rtx-2080ti\",\n          \"url\": \"http://your-gpu-host:8765\",\n          \"apiKey\": \"gpu-key-1\"\n        },\n        {\n          \"name\": \"rtx-3090\",\n          \"url\": \"http://your-second-gpu-host:8765\",\n          \"apiKey\": \"gpu-key-2\"\n        }\n      ],\n      \"loadBalancing\": \"least-busy\",\n      \"healthCheckIntervalSeconds\": 30,\n      \"timeout\": 45,\n      \"models\": {\n        \"embed\": \"all-MiniLM-L6-v2\",\n        \"bertscore\": \"microsoft/deberta-xlarge-mnli\"\n      }\n    }\n  }\n}"
      },
      {
        "title": "v0.1 compatibility",
        "body": "{\n  \"plugins\": {\n    \"@elvatis_com/openclaw-gpu-bridge\": {\n      \"serviceUrl\": \"http://your-gpu-host:8765\",\n      \"apiKey\": \"gpu-key\",\n      \"timeout\": 45\n    }\n  }\n}"
      },
      {
        "title": "Config reference",
        "body": "hosts: array of GPU hosts (v0.2)\nserviceUrl / url: legacy single-host config\nloadBalancing: round-robin or least-busy\nhealthCheckIntervalSeconds: host health polling interval\ntimeout: request timeout for compute endpoints\napiKey: fallback API key for hosts that do not define per-host key\nmodels.embed, models.bertscore: plugin-side default models"
      },
      {
        "title": "GPU Service (Python) Setup",
        "body": "cd gpu-service\npip install -r requirements.txt\nuvicorn gpu_service:app --host 0.0.0.0 --port 8765\n\nDefault models are warmed on startup:\n\nEmbed: all-MiniLM-L6-v2\nBERTScore: microsoft/deberta-xlarge-mnli\n\nAdditional models are loaded on-demand and cached in memory."
      },
      {
        "title": "Environment variables",
        "body": "API_KEY: require X-API-Key for all endpoints except /health\nGPU_MAX_CONCURRENT: max parallel jobs (default 2)\nGPU_EMBED_BATCH: embedding chunk size for progress logging (default 32)\nMODEL_BERTSCORE: default warm model for BERTScore\nMODEL_EMBED: default warm model for embeddings\nTORCH_DEVICE: force device (cuda, cpu, cuda:1)"
      },
      {
        "title": "API Endpoints (GPU Service)",
        "body": "GET /health\nGET /info\nGET /status (queue + active jobs + progress)\nPOST /bertscore\nPOST /embed"
      },
      {
        "title": "Request-level model override",
        "body": "/bertscore:\n\n{\n  \"candidates\": [\"a\"],\n  \"references\": [\"b\"],\n  \"model_type\": \"microsoft/deberta-xlarge-mnli\"\n}\n\n/embed:\n\n{\n  \"texts\": [\"hello world\"],\n  \"model\": \"sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2\"\n}"
      },
      {
        "title": "Exposing to the Internet",
        "body": "If you expose your GPU service outside LAN, use defense-in-depth:\n\nPre-shared key auth (required)\n\nSet API_KEY on service\nConfigure same key in plugin host config (apiKey)\nRequests must include X-API-Key\n\n\n\nTLS/HTTPS (required on public internet)\n\nRecommended: nginx reverse proxy with Let’s Encrypt certs\nAlternative: run uvicorn with SSL cert/key directly"
      },
      {
        "title": "nginx reverse proxy example",
        "body": "server {\n  listen 443 ssl http2;\n  server_name gpu.example.com;\n\n  ssl_certificate /etc/letsencrypt/live/gpu.example.com/fullchain.pem;\n  ssl_certificate_key /etc/letsencrypt/live/gpu.example.com/privkey.pem;\n\n  location / {\n    proxy_pass http://127.0.0.1:8765;\n    proxy_set_header Host $host;\n    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n    proxy_set_header X-Forwarded-Proto $scheme;\n  }\n}"
      },
      {
        "title": "uvicorn SSL example",
        "body": "uvicorn gpu_service:app --host 0.0.0.0 --port 8765 \\\n  --ssl-keyfile /path/key.pem \\\n  --ssl-certfile /path/cert.pem\n\nOptional: WireGuard VPN instead of public exposure\n\nKeep service private behind VPN\nPrefer private WireGuard IPs in plugin hosts[].url\n\n\n\nOperational hardening\n\nFirewall allowlist only OpenClaw server IP\nRate limiting at reverse proxy\nMonitor logs and rotate keys periodically"
      },
      {
        "title": "Development",
        "body": "npm run build\nnpm test\n\nTypeScript runs in strict mode."
      },
      {
        "title": "License",
        "body": "MIT"
      }
    ],
    "body": "@elvatis_com/openclaw-gpu-bridge\n\nOpenClaw plugin to offload ML tasks (BERTScore + embeddings) to one or many remote GPU hosts.\n\nv0.2 Highlights\nMulti-GPU host pool (hosts[]) with:\nround-robin or least-busy load balancing\nautomatic failover\nperiodic host health checks\nBackward compatibility with v0.1 (serviceUrl / url)\nFlexible model selection per request (model / model_type)\nGPU service model caching (on-demand loading)\nOptional transfer visibility via /status endpoint + batch progress logs\nTools\ngpu_health\ngpu_info\ngpu_status (new in v0.2)\ngpu_bertscore\ngpu_embed\nOpenClaw Plugin Config\nv0.2 (recommended)\n{\n  \"plugins\": {\n    \"@elvatis_com/openclaw-gpu-bridge\": {\n      \"hosts\": [\n        {\n          \"name\": \"rtx-2080ti\",\n          \"url\": \"http://your-gpu-host:8765\",\n          \"apiKey\": \"gpu-key-1\"\n        },\n        {\n          \"name\": \"rtx-3090\",\n          \"url\": \"http://your-second-gpu-host:8765\",\n          \"apiKey\": \"gpu-key-2\"\n        }\n      ],\n      \"loadBalancing\": \"least-busy\",\n      \"healthCheckIntervalSeconds\": 30,\n      \"timeout\": 45,\n      \"models\": {\n        \"embed\": \"all-MiniLM-L6-v2\",\n        \"bertscore\": \"microsoft/deberta-xlarge-mnli\"\n      }\n    }\n  }\n}\n\nv0.1 compatibility\n{\n  \"plugins\": {\n    \"@elvatis_com/openclaw-gpu-bridge\": {\n      \"serviceUrl\": \"http://your-gpu-host:8765\",\n      \"apiKey\": \"gpu-key\",\n      \"timeout\": 45\n    }\n  }\n}\n\nConfig reference\nhosts: array of GPU hosts (v0.2)\nserviceUrl / url: legacy single-host config\nloadBalancing: round-robin or least-busy\nhealthCheckIntervalSeconds: host health polling interval\ntimeout: request timeout for compute endpoints\napiKey: fallback API key for hosts that do not define per-host key\nmodels.embed, models.bertscore: plugin-side default models\nGPU Service (Python) Setup\ncd gpu-service\npip install -r requirements.txt\nuvicorn gpu_service:app --host 0.0.0.0 --port 8765\n\n\nDefault models are warmed on startup:\n\nEmbed: all-MiniLM-L6-v2\nBERTScore: microsoft/deberta-xlarge-mnli\n\nAdditional models are loaded on-demand and cached in memory.\n\nEnvironment variables\nAPI_KEY: require X-API-Key for all endpoints except /health\nGPU_MAX_CONCURRENT: max parallel jobs (default 2)\nGPU_EMBED_BATCH: embedding chunk size for progress logging (default 32)\nMODEL_BERTSCORE: default warm model for BERTScore\nMODEL_EMBED: default warm model for embeddings\nTORCH_DEVICE: force device (cuda, cpu, cuda:1)\nAPI Endpoints (GPU Service)\nGET /health\nGET /info\nGET /status (queue + active jobs + progress)\nPOST /bertscore\nPOST /embed\nRequest-level model override\n\n/bertscore:\n\n{\n  \"candidates\": [\"a\"],\n  \"references\": [\"b\"],\n  \"model_type\": \"microsoft/deberta-xlarge-mnli\"\n}\n\n\n/embed:\n\n{\n  \"texts\": [\"hello world\"],\n  \"model\": \"sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2\"\n}\n\nExposing to the Internet\n\nIf you expose your GPU service outside LAN, use defense-in-depth:\n\nPre-shared key auth (required)\n\nSet API_KEY on service\nConfigure same key in plugin host config (apiKey)\nRequests must include X-API-Key\n\nTLS/HTTPS (required on public internet)\n\nRecommended: nginx reverse proxy with Let’s Encrypt certs\nAlternative: run uvicorn with SSL cert/key directly\nnginx reverse proxy example\nserver {\n  listen 443 ssl http2;\n  server_name gpu.example.com;\n\n  ssl_certificate /etc/letsencrypt/live/gpu.example.com/fullchain.pem;\n  ssl_certificate_key /etc/letsencrypt/live/gpu.example.com/privkey.pem;\n\n  location / {\n    proxy_pass http://127.0.0.1:8765;\n    proxy_set_header Host $host;\n    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n    proxy_set_header X-Forwarded-Proto $scheme;\n  }\n}\n\nuvicorn SSL example\nuvicorn gpu_service:app --host 0.0.0.0 --port 8765 \\\n  --ssl-keyfile /path/key.pem \\\n  --ssl-certfile /path/cert.pem\n\n\nOptional: WireGuard VPN instead of public exposure\n\nKeep service private behind VPN\nPrefer private WireGuard IPs in plugin hosts[].url\n\nOperational hardening\n\nFirewall allowlist only OpenClaw server IP\nRate limiting at reverse proxy\nMonitor logs and rotate keys periodically\nDevelopment\nnpm run build\nnpm test\n\n\nTypeScript runs in strict mode.\n\nLicense\n\nMIT"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/homeofe/openclaw-gpu-bridge",
    "publisherUrl": "https://clawhub.ai/homeofe/openclaw-gpu-bridge",
    "owner": "homeofe",
    "version": "0.2.1",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/openclaw-gpu-bridge",
    "downloadUrl": "https://openagent3.xyz/downloads/openclaw-gpu-bridge",
    "agentUrl": "https://openagent3.xyz/skills/openclaw-gpu-bridge/agent",
    "manifestUrl": "https://openagent3.xyz/skills/openclaw-gpu-bridge/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/openclaw-gpu-bridge/agent.md"
  }
}