{
  "schemaVersion": "1.0",
  "item": {
    "slug": "external-ai-integration",
    "name": "External Ai Integration",
    "source": "tencent",
    "type": "skill",
    "category": "开发工具",
    "sourceUrl": "https://clawhub.ai/konscious0beast/external-ai-integration",
    "canonicalUrl": "https://clawhub.ai/konscious0beast/external-ai-integration",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/external-ai-integration",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=external-ai-integration",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SKILL.md",
      "external_ai_integration.py",
      "test_external_ai.py"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-30T16:55:25.780Z",
      "expiresAt": "2026-05-07T16:55:25.780Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
        "contentDisposition": "attachment; filename=\"network-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/external-ai-integration"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/external-ai-integration",
    "agentPageUrl": "https://openagent3.xyz/skills/external-ai-integration/agent",
    "manifestUrl": "https://openagent3.xyz/skills/external-ai-integration/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/external-ai-integration/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "External AI Integration Skill",
        "body": "This skill provides patterns for using external AI models as tools that the assistant can call on‑demand. It extends existing browser‑automation and API‑integration skills, enabling the assistant to:\n\nAutomate interactions with ChatGPT, Claude, Gemini, or other web‑based LLMs via Chrome Relay (browser automation).\nCall Hugging Face Inference API for models hosted on Hugging Face Spaces (text‑generation, summarization, translation, etc.).\nIntegrate external reasoning into the assistant's own workflow—e.g., asking ChatGPT for a second opinion, using Claude for detailed analysis, or leveraging Hugging Face for domain‑specific tasks.\nAvoid spawning isolated sub‑agents by treating external models as tools, keeping control and context within the main assistant session."
      },
      {
        "title": "When to use",
        "body": "You need additional reasoning power, a different model's perspective, or a specialized model (e.g., code generation, translation) that your primary model lacks.\nThe task benefits from a second opinion or parallel evaluation (e.g., reviewing code, analyzing strategy).\nYou want to use a model with a larger context window, better coding ability, or specific domain knowledge (Claude, ChatGPT, Hugging Face models).\nYou are asked to “integrate external AI via browser” or “use ChatGPT/Claude as a tool”.\nYou need to call Hugging Face Inference API for a specific model (e.g., summarization, sentiment analysis) and incorporate the result into your response."
      },
      {
        "title": "1. Browser Automation (Chrome Relay) for Web‑Based LLMs",
        "body": "Use Chrome Relay to automate interactions with ChatGPT, Claude, Gemini, or any other web‑based LLM that requires a browser interface.\n\nPrerequisites:\n\nChrome Relay extension installed and a tab attached (user must click the OpenClaw Browser Relay toolbar icon).\nThe target LLM website (e.g., chatgpt.com, claude.ai) already logged in (session cookies present).\nBasic familiarity with the browser automation playbook (memory/patterns/playbooks.md – “Browser Automation (Chrome Relay)”).\n\nSteps:\n\nAttach to the Chrome Relay profile (profile=\"chrome\").\nNavigate to the target LLM (or reuse an already‑open tab).\nTake a snapshot to locate the input field and send button (use refs=\"aria\" for stable references).\nType the prompt into the input field and submit (click send button or press Enter).\nWait for the response (poll for a new element, detect typing indicators, or use a fixed timeout).\nExtract the response text from the appropriate DOM element.\nReturn the response to the assistant's workflow.\n\nExample workflow:\n\n# This is a conceptual example; actual implementation uses browser tool calls.\ndef ask_chatgpt(prompt):\n    # 1. Ensure Chrome Relay is attached\n    browser(action=\"open\", profile=\"chrome\", targetUrl=\"https://chatgpt.com\")\n    # 2. Snapshot to get references\n    snap = browser(action=\"snapshot\", refs=\"aria\")\n    # 3. Find input field (aria role=\"textbox\") and send button\n    input_ref = snap.find_element(role=\"textbox\", name=\"Message\")\n    send_ref = snap.find_element(role=\"button\", name=\"Send\")\n    # 4. Type prompt and click send\n    browser(action=\"act\", request={\"kind\":\"type\", \"ref\":input_ref, \"text\":prompt})\n    browser(action=\"act\", request={\"kind\":\"click\", \"ref\":send_ref})\n    # 5. Wait for response (simplified)\n    time.sleep(10)\n    # 6. Snapshot again, extract response from last message bubble\n    snap2 = browser(action=\"snapshot\", refs=\"aria\")\n    response_element = snap2.find_last_message()\n    return response_element.text\n\nKey considerations:\n\nSession persistence: The attached tab must stay logged in; avoid actions that log out.\nRate limits: Be aware of the LLM's rate limits and usage policies.\nError handling: Detect captchas, “network error” messages, or “try again” buttons and fall back gracefully.\nMulti‑turn conversations: Maintain conversation context by keeping the same tab and not refreshing."
      },
      {
        "title": "2. Hugging Face Inference API Integration",
        "body": "For models hosted on Hugging Face Spaces or the Inference API, you can call them directly via HTTP requests.\n\nPrerequisites:\n\nHugging Face API token (stored in 1Password or environment variable).\nModel identifier (e.g., \"gpt2\", \"google/flan-t5-large\", \"microsoft/DialoGPT-medium\").\nKnowledge of the model's expected input/output format.\n\nSteps:\n\nRetrieve the API token (use 1Password skill or read from ~/.huggingface/token).\nConstruct the request (URL, headers, JSON payload).\nSend the request via curl or exec with requests Python module.\nParse the response and extract the generated text.\nHandle errors (rate limits, model loading, invalid token).\n\nExample script (using curl):\n\n#!/bin/bash\nset -e\n\nMODEL=\"google/flan-t5-large\"\nPROMPT=\"Translate English to German: How are you?\"\nAPI_TOKEN=$(op read \"op://Personal/HuggingFace/api_token\")\n\ncurl -s \"https://api-inference.huggingface.co/models/$MODEL\" \\\n  -H \"Authorization: Bearer $API_TOKEN\" \\\n  -H \"Content-Type: application/json\" \\\n  -d \"{\\\"inputs\\\": \\\"$PROMPT\\\"}\" | jq -r '.[0].generated_text'\n\nExample Python function (using requests):\n\nimport requests\nimport os\n\ndef hf_inference(model, inputs, parameters=None):\n    api_token = os.getenv(\"HF_TOKEN\")  # or retrieve via 1Password\n    url = f\"https://api-inference.huggingface.co/models/{model}\"\n    headers = {\"Authorization\": f\"Bearer {api_token}\"}\n    payload = {\"inputs\": inputs}\n    if parameters:\n        payload.update(parameters)\n    resp = requests.post(url, headers=headers, json=payload)\n    resp.raise_for_status()\n    return resp.json()\n\nKey considerations:\n\nCost: Inference API may have costs; monitor usage.\nModel readiness: Some models need to be loaded; include {\"options\":{\"wait_for_model\":true}} in parameters.\nOutput format: Response structure varies by model; inspect with a test call first."
      },
      {
        "title": "3. Orchestrating External AI as a Tool",
        "body": "Instead of spawning a sub‑agent, the assistant calls external AI within its own reasoning flow.\n\nPattern:\n\nDetermine need: Decide which external model is appropriate (ChatGPT for creative tasks, Claude for analysis, Hugging Face for specialized models).\nPrepare the prompt: Format the prompt with clear instructions, context, and expected output format.\nCall the tool: Use browser automation for web‑based LLMs or API call for Hugging Face.\nIntegrate the result: Parse, validate, and incorporate the external response into your own answer.\nFallback: If the external call fails, continue with your own reasoning or try an alternative.\n\nExample decision logic:\n\ndef external_ai_assist(task_type, prompt):\n    if task_type == \"code_review\":\n        # Use Claude via browser automation\n        return ask_claude(prompt)\n    elif task_type == \"translation\":\n        # Use Hugging Face translation model\n        return hf_inference(\"Helsinki-NLP/opus-mt-en-de\", prompt)\n    elif task_type == \"creative_writing\":\n        # Use ChatGPT via browser automation\n        return ask_chatgpt(prompt)\n    else:\n        raise ValueError(f\"No external AI configured for {task_type}\")"
      },
      {
        "title": "4. Prompt Engineering for External Models",
        "body": "External models may require different prompting styles than the assistant's native model.\n\nChatGPT/Claude: Use conversational style, system prompts, and markdown formatting.\nHugging Face models: Follow the model's expected input format (e.g., \"Translate English to German: ...\" for T5).\nInclude context: Provide necessary background, constraints, and examples in the prompt.\nSpecify output format: Ask for JSON, bullet points, code blocks, etc.\n\nExample prompt for code review:\n\nYou are an expert software engineer reviewing the following code snippet. Please:\n1. Identify potential bugs or security issues.\n2. Suggest performance improvements.\n3. Comment on code style and readability.\n4. Output your review as a JSON with keys \"bugs\", \"performance\", \"style\".\n\nCode:\n```python\ndef calculate_average(numbers):\n    total = 0\n    for n in numbers:\n        total += n\n    return total / len(numbers)\n\n### 5. Error Handling and Fallbacks\n\nExternal services can fail; plan for graceful degradation.\n\n- **Browser automation failures:** Captchas, login required, network errors. Fallback: try Hugging Face API or continue without external help.\n- **API failures:** Rate limits, model not found, token invalid. Fallback: use a different model or skip external step.\n- **Timeouts:** Set reasonable timeouts (e.g., 30 seconds for browser automation, 10 seconds for API). Fallback: proceed with assistant's own reasoning.\n- **Log failures:** Record external AI failures in `memory/YYYY‑MM‑DD.md` with tag `external‑ai‑failure` for later analysis.\n\n**Example fallback structure:**\n\n```python\ntry:\n    response = ask_chatgpt(prompt)\nexcept (BrowserError, TimeoutError) as e:\n    log_failure(\"ChatGPT\", e)\n    # Fallback to Hugging Face\n    response = hf_inference(\"google/flan-t5-xxl\", prompt)\nexcept Exception as e:\n    log_failure(\"All external AI\", e)\n    response = None\n\nif response:\n    integrate(response)\nelse:\n    # Continue with assistant's own reasoning\n    pass"
      },
      {
        "title": "Example 1: Code Review with Claude",
        "body": "Scenario: The assistant is asked to review a complex React component. It uses Claude (via Chrome Relay) for a detailed second opinion.\n\nSteps:\n\nAssistant prepares a prompt with the component code and review instructions.\nCalls ask_claude(prompt) using browser automation.\nClaude returns a structured review.\nAssistant incorporates Claude's feedback into its final answer."
      },
      {
        "title": "Example 2: Translation via Hugging Face",
        "body": "Scenario: User provides a paragraph in English and asks for a German translation. Assistant calls Hugging Face translation model.\n\nSteps:\n\nAssistant constructs prompt: \"Translate English to German: <text>\".\nCalls hf_inference(\"Helsinki-NLP/opus-mt-en-de\", prompt).\nParses the generated text.\nReturns translation to user."
      },
      {
        "title": "Example 3: Creative Brainstorming with ChatGPT",
        "body": "Scenario: User needs ideas for a blog post title. Assistant uses ChatGPT to generate 10 options.\n\nSteps:\n\nAssistant navigates to ChatGPT tab, inputs “Generate 10 catchy blog post titles about AI assistants”.\nWaits for response, extracts list.\nPresents the list to user, adding its own commentary."
      },
      {
        "title": "Example 4: Combined Analysis (Assistant + External)",
        "body": "Scenario: User asks for a strategic analysis of a business decision. Assistant uses its own reasoning, then asks ChatGPT for potential blind spots.\n\nSteps:\n\nAssistant produces its own analysis.\nAssistant prompts ChatGPT: “What are potential blind spots in the following analysis? <analysis>”\nIntegrates ChatGPT's blind‑spot list into final answer."
      },
      {
        "title": "Anti‑Patterns",
        "body": "Over‑reliance on external AI: Using external models for trivial tasks increases latency and dependency. Use only when value added justifies the cost/risk.\nIgnoring context size: Web‑based LLMs have context limits; sending huge contexts may truncate or fail. Summarize or chunk appropriately.\nExposing secrets: Never paste API tokens, passwords, or sensitive data into external AI prompts (especially web‑based). Use 1Password for tokens.\nAssuming correctness: External AI can be wrong, biased, or hallucinate. Always validate critical outputs.\nBreaking conversation flow: Browser automation that logs out or loses the tab breaks future calls. Keep session alive and avoid destructive actions.\nCost unawareness: Hugging Face Inference API may incur costs; monitor usage and set budgets.\nNeglecting fallbacks: Not planning for external AI failure leaves the assistant stuck. Always have a fallback path."
      },
      {
        "title": "Related Patterns",
        "body": "Browser Automation (Chrome Relay) playbook – detailed steps for Chrome Relay automation.\nHugging Face skill – using Hugging Face Hub, Spaces, and Inference API with budget management.\n1Password skill – retrieving API tokens securely.\nAPI‑Tool Integration skill – general patterns for calling external APIs.\nError Recovery Automation skill – handling failures in external services.\nHealth Monitoring skill – monitoring external service availability."
      },
      {
        "title": "References",
        "body": "docs/browser-automation.md – Chrome Relay setup and commands.\nskills/huggingface/SKILL.md – Hugging Face API usage.\nskills/1password/SKILL.md – retrieving secrets.\nmemory/patterns/playbooks.md – Browser Automation playbook.\nscripts/external_ai_integration.py (this skill's core implementation).\nplaybooks/external-ai-integration-playbook.md (orchestration playbook)."
      },
      {
        "title": "Skill Integration",
        "body": "When a task would benefit from external AI reasoning, read this skill to decide which model to use and how to call it. Store successful patterns in memory/patterns/tools.md. Update pending.md if external AI fails repeatedly and needs manual configuration.\n\nThis skill increases autonomy by expanding the assistant's toolset with external AI models, allowing it to tackle a wider range of tasks without spawning sub‑agents and maintaining control over the workflow."
      }
    ],
    "body": "External AI Integration Skill\n\nThis skill provides patterns for using external AI models as tools that the assistant can call on‑demand. It extends existing browser‑automation and API‑integration skills, enabling the assistant to:\n\nAutomate interactions with ChatGPT, Claude, Gemini, or other web‑based LLMs via Chrome Relay (browser automation).\nCall Hugging Face Inference API for models hosted on Hugging Face Spaces (text‑generation, summarization, translation, etc.).\nIntegrate external reasoning into the assistant's own workflow—e.g., asking ChatGPT for a second opinion, using Claude for detailed analysis, or leveraging Hugging Face for domain‑specific tasks.\nAvoid spawning isolated sub‑agents by treating external models as tools, keeping control and context within the main assistant session.\nWhen to use\nYou need additional reasoning power, a different model's perspective, or a specialized model (e.g., code generation, translation) that your primary model lacks.\nThe task benefits from a second opinion or parallel evaluation (e.g., reviewing code, analyzing strategy).\nYou want to use a model with a larger context window, better coding ability, or specific domain knowledge (Claude, ChatGPT, Hugging Face models).\nYou are asked to “integrate external AI via browser” or “use ChatGPT/Claude as a tool”.\nYou need to call Hugging Face Inference API for a specific model (e.g., summarization, sentiment analysis) and incorporate the result into your response.\nCore patterns\n1. Browser Automation (Chrome Relay) for Web‑Based LLMs\n\nUse Chrome Relay to automate interactions with ChatGPT, Claude, Gemini, or any other web‑based LLM that requires a browser interface.\n\nPrerequisites:\n\nChrome Relay extension installed and a tab attached (user must click the OpenClaw Browser Relay toolbar icon).\nThe target LLM website (e.g., chatgpt.com, claude.ai) already logged in (session cookies present).\nBasic familiarity with the browser automation playbook (memory/patterns/playbooks.md – “Browser Automation (Chrome Relay)”).\n\nSteps:\n\nAttach to the Chrome Relay profile (profile=\"chrome\").\nNavigate to the target LLM (or reuse an already‑open tab).\nTake a snapshot to locate the input field and send button (use refs=\"aria\" for stable references).\nType the prompt into the input field and submit (click send button or press Enter).\nWait for the response (poll for a new element, detect typing indicators, or use a fixed timeout).\nExtract the response text from the appropriate DOM element.\nReturn the response to the assistant's workflow.\n\nExample workflow:\n\n# This is a conceptual example; actual implementation uses browser tool calls.\ndef ask_chatgpt(prompt):\n    # 1. Ensure Chrome Relay is attached\n    browser(action=\"open\", profile=\"chrome\", targetUrl=\"https://chatgpt.com\")\n    # 2. Snapshot to get references\n    snap = browser(action=\"snapshot\", refs=\"aria\")\n    # 3. Find input field (aria role=\"textbox\") and send button\n    input_ref = snap.find_element(role=\"textbox\", name=\"Message\")\n    send_ref = snap.find_element(role=\"button\", name=\"Send\")\n    # 4. Type prompt and click send\n    browser(action=\"act\", request={\"kind\":\"type\", \"ref\":input_ref, \"text\":prompt})\n    browser(action=\"act\", request={\"kind\":\"click\", \"ref\":send_ref})\n    # 5. Wait for response (simplified)\n    time.sleep(10)\n    # 6. Snapshot again, extract response from last message bubble\n    snap2 = browser(action=\"snapshot\", refs=\"aria\")\n    response_element = snap2.find_last_message()\n    return response_element.text\n\n\nKey considerations:\n\nSession persistence: The attached tab must stay logged in; avoid actions that log out.\nRate limits: Be aware of the LLM's rate limits and usage policies.\nError handling: Detect captchas, “network error” messages, or “try again” buttons and fall back gracefully.\nMulti‑turn conversations: Maintain conversation context by keeping the same tab and not refreshing.\n2. Hugging Face Inference API Integration\n\nFor models hosted on Hugging Face Spaces or the Inference API, you can call them directly via HTTP requests.\n\nPrerequisites:\n\nHugging Face API token (stored in 1Password or environment variable).\nModel identifier (e.g., \"gpt2\", \"google/flan-t5-large\", \"microsoft/DialoGPT-medium\").\nKnowledge of the model's expected input/output format.\n\nSteps:\n\nRetrieve the API token (use 1Password skill or read from ~/.huggingface/token).\nConstruct the request (URL, headers, JSON payload).\nSend the request via curl or exec with requests Python module.\nParse the response and extract the generated text.\nHandle errors (rate limits, model loading, invalid token).\n\nExample script (using curl):\n\n#!/bin/bash\nset -e\n\nMODEL=\"google/flan-t5-large\"\nPROMPT=\"Translate English to German: How are you?\"\nAPI_TOKEN=$(op read \"op://Personal/HuggingFace/api_token\")\n\ncurl -s \"https://api-inference.huggingface.co/models/$MODEL\" \\\n  -H \"Authorization: Bearer $API_TOKEN\" \\\n  -H \"Content-Type: application/json\" \\\n  -d \"{\\\"inputs\\\": \\\"$PROMPT\\\"}\" | jq -r '.[0].generated_text'\n\n\nExample Python function (using requests):\n\nimport requests\nimport os\n\ndef hf_inference(model, inputs, parameters=None):\n    api_token = os.getenv(\"HF_TOKEN\")  # or retrieve via 1Password\n    url = f\"https://api-inference.huggingface.co/models/{model}\"\n    headers = {\"Authorization\": f\"Bearer {api_token}\"}\n    payload = {\"inputs\": inputs}\n    if parameters:\n        payload.update(parameters)\n    resp = requests.post(url, headers=headers, json=payload)\n    resp.raise_for_status()\n    return resp.json()\n\n\nKey considerations:\n\nCost: Inference API may have costs; monitor usage.\nModel readiness: Some models need to be loaded; include {\"options\":{\"wait_for_model\":true}} in parameters.\nOutput format: Response structure varies by model; inspect with a test call first.\n3. Orchestrating External AI as a Tool\n\nInstead of spawning a sub‑agent, the assistant calls external AI within its own reasoning flow.\n\nPattern:\n\nDetermine need: Decide which external model is appropriate (ChatGPT for creative tasks, Claude for analysis, Hugging Face for specialized models).\nPrepare the prompt: Format the prompt with clear instructions, context, and expected output format.\nCall the tool: Use browser automation for web‑based LLMs or API call for Hugging Face.\nIntegrate the result: Parse, validate, and incorporate the external response into your own answer.\nFallback: If the external call fails, continue with your own reasoning or try an alternative.\n\nExample decision logic:\n\ndef external_ai_assist(task_type, prompt):\n    if task_type == \"code_review\":\n        # Use Claude via browser automation\n        return ask_claude(prompt)\n    elif task_type == \"translation\":\n        # Use Hugging Face translation model\n        return hf_inference(\"Helsinki-NLP/opus-mt-en-de\", prompt)\n    elif task_type == \"creative_writing\":\n        # Use ChatGPT via browser automation\n        return ask_chatgpt(prompt)\n    else:\n        raise ValueError(f\"No external AI configured for {task_type}\")\n\n4. Prompt Engineering for External Models\n\nExternal models may require different prompting styles than the assistant's native model.\n\nChatGPT/Claude: Use conversational style, system prompts, and markdown formatting.\nHugging Face models: Follow the model's expected input format (e.g., \"Translate English to German: ...\" for T5).\nInclude context: Provide necessary background, constraints, and examples in the prompt.\nSpecify output format: Ask for JSON, bullet points, code blocks, etc.\n\nExample prompt for code review:\n\nYou are an expert software engineer reviewing the following code snippet. Please:\n1. Identify potential bugs or security issues.\n2. Suggest performance improvements.\n3. Comment on code style and readability.\n4. Output your review as a JSON with keys \"bugs\", \"performance\", \"style\".\n\nCode:\n```python\ndef calculate_average(numbers):\n    total = 0\n    for n in numbers:\n        total += n\n    return total / len(numbers)\n\n\n### 5. Error Handling and Fallbacks\n\nExternal services can fail; plan for graceful degradation.\n\n- **Browser automation failures:** Captchas, login required, network errors. Fallback: try Hugging Face API or continue without external help.\n- **API failures:** Rate limits, model not found, token invalid. Fallback: use a different model or skip external step.\n- **Timeouts:** Set reasonable timeouts (e.g., 30 seconds for browser automation, 10 seconds for API). Fallback: proceed with assistant's own reasoning.\n- **Log failures:** Record external AI failures in `memory/YYYY‑MM‑DD.md` with tag `external‑ai‑failure` for later analysis.\n\n**Example fallback structure:**\n\n```python\ntry:\n    response = ask_chatgpt(prompt)\nexcept (BrowserError, TimeoutError) as e:\n    log_failure(\"ChatGPT\", e)\n    # Fallback to Hugging Face\n    response = hf_inference(\"google/flan-t5-xxl\", prompt)\nexcept Exception as e:\n    log_failure(\"All external AI\", e)\n    response = None\n\nif response:\n    integrate(response)\nelse:\n    # Continue with assistant's own reasoning\n    pass\n\nExamples\nExample 1: Code Review with Claude\n\nScenario: The assistant is asked to review a complex React component. It uses Claude (via Chrome Relay) for a detailed second opinion.\n\nSteps:\n\nAssistant prepares a prompt with the component code and review instructions.\nCalls ask_claude(prompt) using browser automation.\nClaude returns a structured review.\nAssistant incorporates Claude's feedback into its final answer.\nExample 2: Translation via Hugging Face\n\nScenario: User provides a paragraph in English and asks for a German translation. Assistant calls Hugging Face translation model.\n\nSteps:\n\nAssistant constructs prompt: \"Translate English to German: <text>\".\nCalls hf_inference(\"Helsinki-NLP/opus-mt-en-de\", prompt).\nParses the generated text.\nReturns translation to user.\nExample 3: Creative Brainstorming with ChatGPT\n\nScenario: User needs ideas for a blog post title. Assistant uses ChatGPT to generate 10 options.\n\nSteps:\n\nAssistant navigates to ChatGPT tab, inputs “Generate 10 catchy blog post titles about AI assistants”.\nWaits for response, extracts list.\nPresents the list to user, adding its own commentary.\nExample 4: Combined Analysis (Assistant + External)\n\nScenario: User asks for a strategic analysis of a business decision. Assistant uses its own reasoning, then asks ChatGPT for potential blind spots.\n\nSteps:\n\nAssistant produces its own analysis.\nAssistant prompts ChatGPT: “What are potential blind spots in the following analysis? <analysis>”\nIntegrates ChatGPT's blind‑spot list into final answer.\nAnti‑Patterns\nOver‑reliance on external AI: Using external models for trivial tasks increases latency and dependency. Use only when value added justifies the cost/risk.\nIgnoring context size: Web‑based LLMs have context limits; sending huge contexts may truncate or fail. Summarize or chunk appropriately.\nExposing secrets: Never paste API tokens, passwords, or sensitive data into external AI prompts (especially web‑based). Use 1Password for tokens.\nAssuming correctness: External AI can be wrong, biased, or hallucinate. Always validate critical outputs.\nBreaking conversation flow: Browser automation that logs out or loses the tab breaks future calls. Keep session alive and avoid destructive actions.\nCost unawareness: Hugging Face Inference API may incur costs; monitor usage and set budgets.\nNeglecting fallbacks: Not planning for external AI failure leaves the assistant stuck. Always have a fallback path.\nRelated Patterns\nBrowser Automation (Chrome Relay) playbook – detailed steps for Chrome Relay automation.\nHugging Face skill – using Hugging Face Hub, Spaces, and Inference API with budget management.\n1Password skill – retrieving API tokens securely.\nAPI‑Tool Integration skill – general patterns for calling external APIs.\nError Recovery Automation skill – handling failures in external services.\nHealth Monitoring skill – monitoring external service availability.\nReferences\ndocs/browser-automation.md – Chrome Relay setup and commands.\nskills/huggingface/SKILL.md – Hugging Face API usage.\nskills/1password/SKILL.md – retrieving secrets.\nmemory/patterns/playbooks.md – Browser Automation playbook.\nscripts/external_ai_integration.py (this skill's core implementation).\nplaybooks/external-ai-integration-playbook.md (orchestration playbook).\nSkill Integration\n\nWhen a task would benefit from external AI reasoning, read this skill to decide which model to use and how to call it. Store successful patterns in memory/patterns/tools.md. Update pending.md if external AI fails repeatedly and needs manual configuration.\n\nThis skill increases autonomy by expanding the assistant's toolset with external AI models, allowing it to tackle a wider range of tasks without spawning sub‑agents and maintaining control over the workflow."
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/konscious0beast/external-ai-integration",
    "publisherUrl": "https://clawhub.ai/konscious0beast/external-ai-integration",
    "owner": "konscious0beast",
    "version": "1.0.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/external-ai-integration",
    "downloadUrl": "https://openagent3.xyz/downloads/external-ai-integration",
    "agentUrl": "https://openagent3.xyz/skills/external-ai-integration/agent",
    "manifestUrl": "https://openagent3.xyz/skills/external-ai-integration/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/external-ai-integration/agent.md"
  }
}