{
  "schemaVersion": "1.0",
  "item": {
    "slug": "ai-sdk-core",
    "name": "Ai Sdk Core",
    "source": "tencent",
    "type": "skill",
    "category": "开发工具",
    "sourceUrl": "https://clawhub.ai/Veeramanikandanr48/ai-sdk-core",
    "canonicalUrl": "https://clawhub.ai/Veeramanikandanr48/ai-sdk-core",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/ai-sdk-core",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=ai-sdk-core",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      ".claude-plugin/plugin.json",
      "README.md",
      "SKILL.md",
      "VERIFICATION_REPORT.md",
      "references/links-to-official-docs.md",
      "references/production-patterns.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-23T16:43:11.935Z",
      "expiresAt": "2026-04-30T16:43:11.935Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
        "contentDisposition": "attachment; filename=\"4claw-imageboard-1.0.1.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/ai-sdk-core"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/ai-sdk-core",
    "agentPageUrl": "https://openagent3.xyz/skills/ai-sdk-core/agent",
    "manifestUrl": "https://openagent3.xyz/skills/ai-sdk-core/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/ai-sdk-core/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "AI SDK Core",
        "body": "Backend AI with Vercel AI SDK v5 and v6.\n\nInstallation:\n\nnpm install ai @ai-sdk/openai @ai-sdk/anthropic @ai-sdk/google zod"
      },
      {
        "title": "AI SDK 6 (Stable - January 2026)",
        "body": "Status: Stable\nLatest: ai@6.0.26 (Jan 2026)"
      },
      {
        "title": "BREAKING: Output API Replaces generateObject/streamObject",
        "body": "⚠️ CRITICAL: generateObject() and streamObject() are DEPRECATED and will be removed in a future version. Use the new Output API instead.\n\nBefore (v5 - DEPRECATED):\n\n// ❌ DEPRECATED - will be removed\nimport { generateObject } from 'ai';\n\nconst result = await generateObject({\n  model: openai('gpt-5'),\n  schema: z.object({ name: z.string(), age: z.number() }),\n  prompt: 'Generate a person',\n});\n\nAfter (v6 - USE THIS):\n\n// ✅ NEW OUTPUT API\nimport { generateText, Output } from 'ai';\n\nconst result = await generateText({\n  model: openai('gpt-5'),\n  output: Output.object({ schema: z.object({ name: z.string(), age: z.number() }) }),\n  prompt: 'Generate a person',\n});\n\n// Access the typed object\nconsole.log(result.object); // { name: \"Alice\", age: 30 }"
      },
      {
        "title": "Output Types",
        "body": "import { generateText, Output } from 'ai';\n\n// Object with Zod schema\noutput: Output.object({ schema: myZodSchema })\n\n// Array of typed objects\noutput: Output.array({ schema: personSchema })\n\n// Enum/choice from options\noutput: Output.choice({ choices: ['positive', 'negative', 'neutral'] })\n\n// Plain text (explicit)\noutput: Output.text()\n\n// Unstructured JSON (no schema validation)\noutput: Output.json()"
      },
      {
        "title": "Streaming with Output API",
        "body": "import { streamText, Output } from 'ai';\n\nconst result = streamText({\n  model: openai('gpt-5'),\n  output: Output.object({ schema: personSchema }),\n  prompt: 'Generate a person',\n});\n\n// Stream partial objects\nfor await (const partialObject of result.objectStream) {\n  console.log(partialObject); // { name: \"Ali...\" } -> { name: \"Alice\", age: ... }\n}\n\n// Get final object\nconst finalObject = await result.object;"
      },
      {
        "title": "v6 New Features",
        "body": "1. Agent Abstraction\nUnified interface for building agents with ToolLoopAgent class:\n\nFull control over execution flow, tool loops, and state management\nReplaces manual tool calling orchestration\n\n2. Tool Execution Approval (Human-in-the-Loop)\n\nUse selective approval for better UX. Not every tool call needs approval.\n\ntools: {\n  payment: tool({\n    // Dynamic approval based on input\n    needsApproval: async ({ amount }) => amount > 1000,\n    inputSchema: z.object({ amount: z.number() }),\n    execute: async ({ amount }) => { /* process payment */ },\n  }),\n\n  readFile: tool({\n    needsApproval: false, // Safe operations don't need approval\n    inputSchema: z.object({ path: z.string() }),\n    execute: async ({ path }) => fs.readFile(path),\n  }),\n\n  deleteFile: tool({\n    needsApproval: true, // Destructive operations always need approval\n    inputSchema: z.object({ path: z.string() }),\n    execute: async ({ path }) => fs.unlink(path),\n  }),\n}\n\nBest Practices:\n\nUse dynamic approval for operations where risk depends on parameters (e.g., payment amount)\nAlways require approval for destructive operations (delete, modify, purchase)\nDon't require approval for safe read operations\nAdd system instruction: \"When a tool execution is not approved, do not retry it\"\nImplement timeout for approval requests to prevent stuck states\nStore user preferences for repeat actions\n\nSources:\n\nNext.js Human-in-the-Loop Guide\nCloudflare Agents Human-in-the-Loop\nPermit.io Best Practices\n\n3. Reranking for RAG\n\nimport { rerank } from 'ai';\n\nconst result = await rerank({\n  model: cohere.reranker('rerank-v3.5'),\n  query: 'user question',\n  documents: searchResults,\n  topK: 5,\n});\n\n4. MCP Tools (Model Context Protocol)\n\n⚠️ SECURITY WARNING: MCP tools have significant production risks. See security section below.\n\nimport { experimental_createMCPClient } from 'ai';\n\nconst mcpClient = await experimental_createMCPClient({\n  transport: { type: 'stdio', command: 'npx', args: ['-y', '@modelcontextprotocol/server-filesystem'] },\n});\n\nconst tools = await mcpClient.tools();\n\nconst result = await generateText({\n  model: openai('gpt-5'),\n  tools,\n  prompt: 'List files in the current directory',\n});\n\nKnown Issue: MCP tools may not execute in streaming mode (Vercel Community Discussion). Use generateText() instead of streamText() for MCP tools.\n\nMCP Security Considerations\n\n⚠️ CRITICAL: Dynamic MCP tools in production have security risks:\n\nRisks:\n\nTool definitions become part of your agent's prompt\nCan change unexpectedly without warning\nCompromised MCP server can inject malicious prompts\nNew tools can escalate user privileges (e.g., adding delete to read-only server)\n\nSolution - Use Static Tool Generation:\n\n// ❌ RISKY: Dynamic tools change without your control\nconst mcpClient = await experimental_createMCPClient({ /* ... */ });\nconst tools = await mcpClient.tools(); // Can change anytime!\n\n// ✅ SAFE: Generate static, versioned tool definitions\n// Step 1: Install mcp-to-ai-sdk\nnpm install -g mcp-to-ai-sdk\n\n// Step 2: Generate static tools (one-time, version controlled)\nnpx mcp-to-ai-sdk generate stdio 'npx -y @modelcontextprotocol/server-filesystem'\n\n// Step 3: Import static tools\nimport { tools } from './generated-mcp-tools';\n\nconst result = await generateText({\n  model: openai('gpt-5'),\n  tools, // Static, reviewed, versioned\n  prompt: 'Use tools',\n});\n\nBest Practice: Generate static tools, review them, commit to version control, and only update intentionally.\n\nSource: Vercel Blog: MCP Security\n\n5. Language Model Middleware\n\nimport { wrapLanguageModel, extractReasoningMiddleware } from 'ai';\n\nconst wrappedModel = wrapLanguageModel({\n  model: anthropic('claude-sonnet-4-5-20250929'),\n  middleware: extractReasoningMiddleware({ tagName: 'think' }),\n});\n\n// Reasoning extracted automatically from <think>...</think> tags\n\n6. Telemetry (OpenTelemetry)\n\nconst result = await generateText({\n  model: openai('gpt-5'),\n  prompt: 'Hello',\n  experimental_telemetry: {\n    isEnabled: true,\n    functionId: 'my-chat-function',\n    metadata: { userId: '123' },\n    recordInputs: true,\n    recordOutputs: true,\n  },\n});\n\nOfficial Docs: https://ai-sdk.dev/docs"
      },
      {
        "title": "OpenAI",
        "body": "GPT-5.2 (Dec 2025):\n\n400k context window, 128k output tokens\nEnhanced reasoning capabilities\nAvailable in API platform\n\nGPT-5.1 (Nov 2025):\n\nImproved speed and efficiency over GPT-5\n\"Warmer\" and more intelligent responses\n\nGPT-5 (Aug 2025):\n\n45% less hallucination than GPT-4o\nState-of-the-art in math, coding, visual perception\n\no3 Reasoning Models (Dec 2025):\n\no3, o3-pro, o3-mini - Advanced reasoning\no4-mini - Fast reasoning\n\nimport { openai } from '@ai-sdk/openai';\nconst gpt52 = openai('gpt-5.2');\nconst gpt51 = openai('gpt-5.1');\nconst gpt5 = openai('gpt-5');\nconst o3 = openai('o3');\nconst o3mini = openai('o3-mini');"
      },
      {
        "title": "Anthropic",
        "body": "Claude 4 Family (May-Oct 2025):\n\nOpus 4 (May 22): Best for complex reasoning, $15/$75 per million tokens\nSonnet 4 (May 22): Balanced performance, $3/$15 per million tokens\nOpus 4.1 (Aug 5): Enhanced agentic tasks, real-world coding\nSonnet 4.5 (Sept 29): Most capable for coding, agents, computer use\nHaiku 4.5 (Oct 15): Small, fast, low-latency model\n\nimport { anthropic } from '@ai-sdk/anthropic';\nconst sonnet45 = anthropic('claude-sonnet-4-5-20250929');  // Latest\nconst opus41 = anthropic('claude-opus-4-1-20250805');\nconst haiku45 = anthropic('claude-haiku-4-5-20251015');"
      },
      {
        "title": "Google",
        "body": "Gemini 2.5 Family (Mar-Sept 2025):\n\nPro (March 2025): Most intelligent, #1 on LMArena at launch\nPro Deep Think (May 2025): Enhanced reasoning mode\nFlash (May 2025): Fast, cost-effective\nFlash-Lite (Sept 2025): Updated efficiency\n\nimport { google } from '@ai-sdk/google';\nconst pro = google('gemini-2.5-pro');\nconst flash = google('gemini-2.5-flash');\nconst lite = google('gemini-2.5-flash-lite');"
      },
      {
        "title": "Text Generation",
        "body": "generateText() - Text completion with tools\nstreamText() - Real-time streaming"
      },
      {
        "title": "Structured Output (v6 Output API)",
        "body": "Output.object() - Typed objects with Zod schema (replaces generateObject)\nOutput.array() - Typed arrays\nOutput.choice() - Enum selection\nOutput.json() - Unstructured JSON\n\nSee \"AI SDK 6\" section above for usage examples."
      },
      {
        "title": "Multi-Modal Capabilities",
        "body": "Speech Synthesis (Text-to-Speech)\n\nimport { experimental_generateSpeech as generateSpeech } from 'ai';\nimport { openai } from '@ai-sdk/openai';\n\nconst result = await generateSpeech({\n  model: openai.speech('tts-1-hd'),\n  voice: 'alloy',\n  text: 'Hello, how can I help you today?',\n});\n\n// result.audio is an ArrayBuffer containing the audio\nconst audioBuffer = result.audio;\n\nSupported Providers:\n\nOpenAI: tts-1, tts-1-hd, gpt-4o-mini-tts\nElevenLabs: eleven_multilingual_v2, eleven_turbo_v2\nLMNT, Hume\n\nTranscription (Speech-to-Text)\n\nimport { experimental_transcribe as transcribe } from 'ai';\nimport { openai } from '@ai-sdk/openai';\n\nconst result = await transcribe({\n  model: openai.transcription('whisper-1'),\n  audio: audioFile, // File, Blob, ArrayBuffer, or URL\n});\n\nconsole.log(result.text); // Transcribed text\nconsole.log(result.segments); // Timestamped segments\n\nSupported Providers:\n\nOpenAI: whisper-1\nElevenLabs, Deepgram, AssemblyAI, Groq, Rev.ai\n\nImage Generation\n\nimport { generateImage } from 'ai';\nimport { openai } from '@ai-sdk/openai';\n\nconst result = await generateImage({\n  model: openai.image('dall-e-3'),\n  prompt: 'A futuristic city at sunset',\n  size: '1024x1024',\n  n: 1,\n});\n\n// result.images is an array of generated images\nconst imageUrl = result.images[0].url;\nconst imageBase64 = result.images[0].base64;\n\nSupported Providers:\n\nOpenAI: dall-e-2, dall-e-3\nGoogle: imagen-3.0\nFal AI, Black Forest Labs (Flux), Luma AI, Replicate\n\nEmbeddings\n\nimport { embed, embedMany, cosineSimilarity } from 'ai';\nimport { openai } from '@ai-sdk/openai';\n\n// Single embedding\nconst result = await embed({\n  model: openai.embedding('text-embedding-3-small'),\n  value: 'Hello world',\n});\nconsole.log(result.embedding); // number[]\n\n// Multiple embeddings (parallel processing)\nconst results = await embedMany({\n  model: openai.embedding('text-embedding-3-small'),\n  values: ['Hello', 'World', 'AI'],\n  maxParallelCalls: 5, // Parallel processing\n});\n\n// Compare similarity\nconst similarity = cosineSimilarity(\n  results.embeddings[0],\n  results.embeddings[1]\n);\nconsole.log(`Similarity: ${similarity}`); // 0.0 to 1.0\n\nSupported Providers:\n\nOpenAI: text-embedding-3-small, text-embedding-3-large\nGoogle: text-embedding-004\nCohere, Voyage AI, Mistral, Amazon Bedrock\n\nMulti-Modal Prompts (Files, Images, PDFs)\n\nimport { generateText } from 'ai';\nimport { google } from '@ai-sdk/google';\n\nconst result = await generateText({\n  model: google('gemini-2.5-pro'),\n  messages: [{\n    role: 'user',\n    content: [\n      { type: 'text', text: 'Summarize this document' },\n      { type: 'file', data: pdfBuffer, mimeType: 'application/pdf' },\n    ],\n  }],\n});\n\n// Or with images\nconst result = await generateText({\n  model: openai('gpt-5'),\n  messages: [{\n    role: 'user',\n    content: [\n      { type: 'text', text: 'What is in this image?' },\n      { type: 'image', image: imageBuffer },\n    ],\n  }],\n});\n\nSee official docs for full API: https://ai-sdk.dev/docs/ai-sdk-core"
      },
      {
        "title": "v5 Stream Response Methods",
        "body": "When returning streaming responses from an API, use the correct method:\n\nMethodOutput FormatUse CasetoTextStreamResponse()Plain text chunksSimple text streamingtoUIMessageStreamResponse()SSE with JSON eventsChat UIs (text-start, text-delta, text-end, finish)\n\nFor chat widgets and UIs, always use toUIMessageStreamResponse():\n\nconst result = streamText({\n  model: workersai('@cf/qwen/qwen3-30b-a3b-fp8'),\n  messages,\n  system: 'You are helpful.',\n});\n\n// ✅ For chat UIs - returns SSE with JSON events\nreturn result.toUIMessageStreamResponse({\n  headers: { 'Access-Control-Allow-Origin': '*' },\n});\n\n// ❌ For simple text - returns plain text chunks only\nreturn result.toTextStreamResponse();\n\nNote: toDataStreamResponse() does NOT exist in AI SDK v5 (common misconception)."
      },
      {
        "title": "workers-ai-provider Version Compatibility",
        "body": "IMPORTANT: workers-ai-provider@2.x requires AI SDK v5, NOT v4.\n\n# ✅ Correct - AI SDK v5 with workers-ai-provider v2\nnpm install ai@^5.0.0 workers-ai-provider@^2.0.0 zod@^3.25.0\n\n# ❌ Wrong - AI SDK v4 causes error\nnpm install ai@^4.0.0 workers-ai-provider@^2.0.0\n# Error: \"AI SDK 4 only supports models that implement specification version v1\"\n\nZod Version: AI SDK v5 requires zod@^3.25.0 or later for zod/v3 and zod/v4 exports. Older versions (3.22.x) cause build errors: \"Could not resolve zod/v4\"."
      },
      {
        "title": "Cloudflare Workers Startup Fix",
        "body": "Problem: AI SDK v5 + Zod causes >270ms startup time (exceeds Workers 400ms limit).\n\nSolution:\n\n// ❌ BAD: Top-level imports cause startup overhead\nimport { createWorkersAI } from 'workers-ai-provider';\nconst workersai = createWorkersAI({ binding: env.AI });\n\n// ✅ GOOD: Lazy initialization inside handler\napp.post('/chat', async (c) => {\n  const { createWorkersAI } = await import('workers-ai-provider');\n  const workersai = createWorkersAI({ binding: c.env.AI });\n  // ...\n});\n\nAdditional:\n\nMinimize top-level Zod schemas\nMove complex schemas into route handlers\nMonitor startup time with Wrangler"
      },
      {
        "title": "v5 Tool Calling Changes",
        "body": "Breaking Changes:\n\nparameters → inputSchema (Zod schema)\nTool properties: args → input, result → output\nToolExecutionError removed (now tool-error content parts)\nmaxSteps parameter removed → Use stopWhen(stepCountIs(n))\n\nNew in v5:\n\nDynamic tools (add tools at runtime based on context)\nAgent class (multi-step execution with tools)"
      },
      {
        "title": "Critical v4→v5 Migration",
        "body": "AI SDK v5 introduced extensive breaking changes. If migrating from v4, follow this guide."
      },
      {
        "title": "Breaking Changes Overview",
        "body": "Parameter Renames\n\nmaxTokens → maxOutputTokens\nproviderMetadata → providerOptions\n\n\n\nTool Definitions\n\nparameters → inputSchema\nTool properties: args → input, result → output\n\n\n\nMessage Types\n\nCoreMessage → ModelMessage\nMessage → UIMessage\nconvertToCoreMessages → convertToModelMessages\n\n\n\nTool Error Handling\n\nToolExecutionError class removed\nNow tool-error content parts\nEnables automated retry\n\n\n\nMulti-Step Execution\n\nmaxSteps → stopWhen\nUse stepCountIs() or hasToolCall()\n\n\n\nMessage Structure\n\nSimple content string → parts array\nParts: text, file, reasoning, tool-call, tool-result\n\n\n\nStreaming Architecture\n\nSingle chunk → start/delta/end lifecycle\nUnique IDs for concurrent streams\n\n\n\nTool Streaming\n\nEnabled by default\ntoolCallStreaming option removed\n\n\n\nPackage Reorganization\n\nai/rsc → @ai-sdk/rsc\nai/react → @ai-sdk/react\nLangChainAdapter → @ai-sdk/langchain"
      },
      {
        "title": "Migration Examples",
        "body": "Before (v4):\n\nimport { generateText } from 'ai';\n\nconst result = await generateText({\n  model: openai.chat('gpt-4-turbo'),\n  maxTokens: 500,\n  providerMetadata: { openai: { user: 'user-123' } },\n  tools: {\n    weather: {\n      description: 'Get weather',\n      parameters: z.object({ location: z.string() }),\n      execute: async (args) => { /* args.location */ },\n    },\n  },\n  maxSteps: 5,\n});\n\nAfter (v5):\n\nimport { generateText, tool, stopWhen, stepCountIs } from 'ai';\n\nconst result = await generateText({\n  model: openai('gpt-4-turbo'),\n  maxOutputTokens: 500,\n  providerOptions: { openai: { user: 'user-123' } },\n  tools: {\n    weather: tool({\n      description: 'Get weather',\n      inputSchema: z.object({ location: z.string() }),\n      execute: async ({ location }) => { /* input.location */ },\n    }),\n  },\n  stopWhen: stepCountIs(5),\n});"
      },
      {
        "title": "Migration Checklist",
        "body": "Update all maxTokens to maxOutputTokens\n Update providerMetadata to providerOptions\n Convert tool parameters to inputSchema\n Update tool execute functions: args → input\n Replace maxSteps with stopWhen(stepCountIs(n))\n Update message types: CoreMessage → ModelMessage\n Remove ToolExecutionError handling\n Update package imports (ai/rsc → @ai-sdk/rsc)\n Test streaming behavior (architecture changed)\n Update TypeScript types"
      },
      {
        "title": "Automated Migration",
        "body": "AI SDK provides a migration tool:\n\nnpx ai migrate\n\nThis will update most breaking changes automatically. Review changes carefully.\n\nOfficial Migration Guide:\nhttps://ai-sdk.dev/docs/migration-guides/migration-guide-5-0"
      },
      {
        "title": "1. AI_APICallError",
        "body": "Cause: API request failed (network, auth, rate limit).\n\nSolution:\n\nimport { AI_APICallError } from 'ai';\n\ntry {\n  const result = await generateText({\n    model: openai('gpt-4-turbo'),\n    prompt: 'Hello',\n  });\n} catch (error) {\n  if (error instanceof AI_APICallError) {\n    console.error('API call failed:', error.message);\n    console.error('Status code:', error.statusCode);\n    console.error('Response:', error.responseBody);\n\n    // Check common causes\n    if (error.statusCode === 401) {\n      // Invalid API key\n    } else if (error.statusCode === 429) {\n      // Rate limit - implement backoff\n    } else if (error.statusCode >= 500) {\n      // Provider issue - retry\n    }\n  }\n}\n\nPrevention:\n\nValidate API keys at startup\nImplement retry logic with exponential backoff\nMonitor rate limits\nHandle network errors gracefully"
      },
      {
        "title": "2. AI_NoObjectGeneratedError",
        "body": "Cause: Model didn't generate valid object matching schema.\n\nSolution:\n\nimport { AI_NoObjectGeneratedError } from 'ai';\n\ntry {\n  const result = await generateObject({\n    model: openai('gpt-4-turbo'),\n    schema: z.object({ /* complex schema */ }),\n    prompt: 'Generate data',\n  });\n} catch (error) {\n  if (error instanceof AI_NoObjectGeneratedError) {\n    console.error('No valid object generated');\n\n    // Solutions:\n    // 1. Simplify schema\n    // 2. Add more context to prompt\n    // 3. Provide examples in prompt\n    // 4. Try different model (gpt-5 or claude-sonnet-4-5 for complex objects)\n  }\n}\n\nPrevention:\n\nStart with simple schemas, add complexity incrementally\nInclude examples in prompt: \"Generate a person like: { name: 'Alice', age: 30 }\"\nUse GPT-4 for complex structured output\nTest schemas with sample data first"
      },
      {
        "title": "3. Worker Startup Limit (270ms+)",
        "body": "Cause: AI SDK v5 + Zod initialization overhead in Cloudflare Workers exceeds startup limits.\n\nSolution:\n\n// BAD: Top-level imports cause startup overhead\nimport { createWorkersAI } from 'workers-ai-provider';\nimport { complexSchema } from './schemas';\n\nconst workersai = createWorkersAI({ binding: env.AI });\n\n// GOOD: Lazy initialization inside handler\nexport default {\n  async fetch(request, env) {\n    const { createWorkersAI } = await import('workers-ai-provider');\n    const workersai = createWorkersAI({ binding: env.AI });\n\n    // Use workersai here\n  }\n}\n\nPrevention:\n\nMove AI SDK imports inside route handlers\nMinimize top-level Zod schemas\nMonitor Worker startup time (must be <400ms)\nUse Wrangler's startup time reporting\n\nGitHub Issue: Search for \"Workers startup limit\" in Vercel AI SDK issues"
      },
      {
        "title": "4. streamText Fails Silently",
        "body": "Cause: Stream errors can be swallowed by createDataStreamResponse.\n\nStatus: ✅ RESOLVED - Fixed in ai@4.1.22 (February 2025)\n\nSolution (Recommended):\n\n// Use the onError callback (added in v4.1.22)\nconst stream = streamText({\n  model: openai('gpt-4-turbo'),\n  prompt: 'Hello',\n  onError({ error }) {\n    console.error('Stream error:', error);\n    // Custom error logging and handling\n  },\n});\n\n// Stream safely\nfor await (const chunk of stream.textStream) {\n  process.stdout.write(chunk);\n}\n\nAlternative (Manual try-catch):\n\n// Fallback if not using onError callback\ntry {\n  const stream = streamText({\n    model: openai('gpt-4-turbo'),\n    prompt: 'Hello',\n  });\n\n  for await (const chunk of stream.textStream) {\n    process.stdout.write(chunk);\n  }\n} catch (error) {\n  console.error('Stream error:', error);\n}\n\nPrevention:\n\nUse onError callback for proper error capture (recommended)\nImplement server-side error monitoring\nTest stream error handling explicitly\nAlways log on server side in production\n\nGitHub Issue: #4726 (RESOLVED)"
      },
      {
        "title": "5. AI_LoadAPIKeyError",
        "body": "Cause: Missing or invalid API key.\n\nSolution:\n\nimport { AI_LoadAPIKeyError } from 'ai';\n\ntry {\n  const result = await generateText({\n    model: openai('gpt-4-turbo'),\n    prompt: 'Hello',\n  });\n} catch (error) {\n  if (error instanceof AI_LoadAPIKeyError) {\n    console.error('API key error:', error.message);\n\n    // Check:\n    // 1. .env file exists and loaded\n    // 2. Correct env variable name (OPENAI_API_KEY)\n    // 3. Key format is valid (starts with sk-)\n  }\n}\n\nPrevention:\n\nValidate API keys at application startup\nUse environment variable validation (e.g., zod)\nProvide clear error messages in development\nDocument required environment variables"
      },
      {
        "title": "6. AI_InvalidArgumentError",
        "body": "Cause: Invalid parameters passed to function.\n\nSolution:\n\nimport { AI_InvalidArgumentError } from 'ai';\n\ntry {\n  const result = await generateText({\n    model: openai('gpt-4-turbo'),\n    maxOutputTokens: -1,  // Invalid!\n    prompt: 'Hello',\n  });\n} catch (error) {\n  if (error instanceof AI_InvalidArgumentError) {\n    console.error('Invalid argument:', error.message);\n    // Check parameter types and values\n  }\n}\n\nPrevention:\n\nUse TypeScript for type checking\nValidate inputs before calling AI SDK functions\nRead function signatures carefully\nCheck official docs for parameter constraints"
      },
      {
        "title": "7. AI_NoContentGeneratedError",
        "body": "Cause: Model generated no content (safety filters, etc.).\n\nSolution:\n\nimport { AI_NoContentGeneratedError } from 'ai';\n\ntry {\n  const result = await generateText({\n    model: openai('gpt-4-turbo'),\n    prompt: 'Some prompt',\n  });\n} catch (error) {\n  if (error instanceof AI_NoContentGeneratedError) {\n    console.error('No content generated');\n\n    // Possible causes:\n    // 1. Safety filters blocked output\n    // 2. Prompt triggered content policy\n    // 3. Model configuration issue\n\n    // Handle gracefully:\n    return { text: 'Unable to generate response. Please try different input.' };\n  }\n}\n\nPrevention:\n\nSanitize user inputs\nAvoid prompts that may trigger safety filters\nHave fallback messaging\nLog occurrences for analysis"
      },
      {
        "title": "8. AI_TypeValidationError",
        "body": "Cause: Zod schema validation failed on generated output.\n\nSolution:\n\nimport { AI_TypeValidationError } from 'ai';\n\ntry {\n  const result = await generateObject({\n    model: openai('gpt-4-turbo'),\n    schema: z.object({\n      age: z.number().min(0).max(120),  // Strict validation\n    }),\n    prompt: 'Generate person',\n  });\n} catch (error) {\n  if (error instanceof AI_TypeValidationError) {\n    console.error('Validation failed:', error.message);\n\n    // Solutions:\n    // 1. Relax schema constraints\n    // 2. Add more guidance in prompt\n    // 3. Use .optional() for unreliable fields\n  }\n}\n\nPrevention:\n\nStart with lenient schemas, tighten gradually\nUse .optional() for fields that may not always be present\nAdd validation hints in field descriptions\nTest with various prompts"
      },
      {
        "title": "9. AI_RetryError",
        "body": "Cause: All retry attempts failed.\n\nSolution:\n\nimport { AI_RetryError } from 'ai';\n\ntry {\n  const result = await generateText({\n    model: openai('gpt-4-turbo'),\n    prompt: 'Hello',\n    maxRetries: 3,  // Default is 2\n  });\n} catch (error) {\n  if (error instanceof AI_RetryError) {\n    console.error('All retries failed');\n    console.error('Last error:', error.lastError);\n\n    // Check root cause:\n    // - Persistent network issue\n    // - Provider outage\n    // - Invalid configuration\n  }\n}\n\nPrevention:\n\nInvestigate root cause of failures\nAdjust retry configuration if needed\nImplement circuit breaker pattern for provider outages\nHave fallback providers"
      },
      {
        "title": "10. Rate Limiting Errors",
        "body": "Cause: Exceeded provider rate limits (RPM/TPM).\n\nSolution:\n\n// Implement exponential backoff\nasync function generateWithBackoff(prompt: string, retries = 3) {\n  for (let i = 0; i < retries; i++) {\n    try {\n      return await generateText({\n        model: openai('gpt-4-turbo'),\n        prompt,\n      });\n    } catch (error) {\n      if (error instanceof AI_APICallError && error.statusCode === 429) {\n        const delay = Math.pow(2, i) * 1000;  // Exponential backoff\n        console.log(`Rate limited, waiting ${delay}ms`);\n        await new Promise(resolve => setTimeout(resolve, delay));\n      } else {\n        throw error;\n      }\n    }\n  }\n  throw new Error('Rate limit retries exhausted');\n}\n\nPrevention:\n\nMonitor rate limit headers\nQueue requests to stay under limits\nUpgrade provider tier if needed\nImplement request throttling"
      },
      {
        "title": "11. TypeScript Performance with Zod",
        "body": "Cause: Complex Zod schemas slow down TypeScript type checking.\n\nSolution:\n\n// Instead of deeply nested schemas at top level:\n// const complexSchema = z.object({ /* 100+ fields */ });\n\n// Define inside functions or use type assertions:\nfunction generateData() {\n  const schema = z.object({ /* complex schema */ });\n  return generateObject({ model: openai('gpt-4-turbo'), schema, prompt: '...' });\n}\n\n// Or use z.lazy() for recursive schemas:\ntype Category = { name: string; subcategories?: Category[] };\nconst CategorySchema: z.ZodType<Category> = z.lazy(() =>\n  z.object({\n    name: z.string(),\n    subcategories: z.array(CategorySchema).optional(),\n  })\n);\n\nPrevention:\n\nAvoid top-level complex schemas\nUse z.lazy() for recursive types\nSplit large schemas into smaller ones\nUse type assertions where appropriate\n\nOfficial Docs:\nhttps://ai-sdk.dev/docs/troubleshooting/common-issues/slow-type-checking"
      },
      {
        "title": "12. Invalid JSON Response (Provider-Specific)",
        "body": "Cause: Some models occasionally return invalid JSON.\n\nSolution:\n\n// Use built-in retry and mode selection\nconst result = await generateObject({\n  model: openai('gpt-4-turbo'),\n  schema: mySchema,\n  prompt: 'Generate data',\n  mode: 'json',  // Force JSON mode (supported by GPT-4)\n  maxRetries: 3,  // Retry on invalid JSON\n});\n\n// Or catch and retry manually:\ntry {\n  const result = await generateObject({\n    model: openai('gpt-4-turbo'),\n    schema: mySchema,\n    prompt: 'Generate data',\n  });\n} catch (error) {\n  // Retry with different model\n  const result = await generateObject({\n    model: openai('gpt-4-turbo'),\n    schema: mySchema,\n    prompt: 'Generate data',\n  });\n}\n\nPrevention:\n\nUse mode: 'json' when available\nPrefer GPT-4 for structured output\nImplement retry logic\nValidate responses\n\nGitHub Issue: #4302 (Imagen 3.0 Invalid JSON)"
      },
      {
        "title": "13. Gemini Implicit Caching Fails with Tools",
        "body": "Error: No error, but higher API costs due to disabled caching\nCause: Google Gemini 3 Flash's cost-saving implicit caching doesn't work when any tools are defined, even if never used.\nSource: GitHub Issue #11513\n\nWhy It Happens: Gemini API disables caching when tools are present in the request, regardless of whether they're invoked.\n\nPrevention:\n\n// Conditionally add tools only when needed\nconst needsTools = await analyzePrompt(userInput);\n\nconst result = await generateText({\n  model: google('gemini-3-flash'),\n  tools: needsTools ? { weather: weatherTool } : undefined,\n  prompt: userInput,\n});\n\nImpact: High - Can significantly increase API costs for repeated context"
      },
      {
        "title": "14. Anthropic Tool Error Results Cause JSON Parse Crash",
        "body": "Error: SyntaxError: \"[object Object]\" is not valid JSON\nCause: Anthropic provider built-in tools (web_fetch, etc.) return error objects that SDK tries to JSON.parse\nSource: GitHub Issue #11856\n\nWhy It Happens: When Anthropic built-in tools fail (e.g., url_not_allowed), they return error objects. AI SDK incorrectly tries to parse these as JSON strings.\n\nPrevention:\n\ntry {\n  const result = await generateText({\n    model: anthropic('claude-sonnet-4-5-20250929'),\n    tools: { web_fetch: { type: 'anthropic_defined', name: 'web_fetch' } },\n    prompt: userPrompt,\n  });\n} catch (error) {\n  if (error.message.includes('is not valid JSON')) {\n    // Tool returned error result, handle gracefully\n    console.error('Tool execution failed - likely blocked URL or permission issue');\n    // Retry without tool or use custom tool\n  }\n  throw error;\n}\n\nImpact: High - Production crashes when using Anthropic built-in tools"
      },
      {
        "title": "15. Tool-Result in Assistant Message (Anthropic)",
        "body": "Error: Anthropic API error - tool-result in assistant message not allowed\nCause: Server-executed tools incorrectly place tool-result parts in assistant messages\nSource: GitHub Issue #11855\n\nWhy It Happens: When using server-executed tools (tools where execute runs on server, not sent to model), the AI SDK incorrectly includes tool-result parts in the assistant message. Anthropic expects tool-result only in user messages.\n\nPrevention:\n\n// Workaround: Filter messages before sending\nconst filteredMessages = messages.map(msg => {\n  if (msg.role === 'assistant') {\n    return {\n      ...msg,\n      content: msg.content.filter(part => part.type !== 'tool-result'),\n    };\n  }\n  return msg;\n});\n\nconst result = await generateText({\n  model: anthropic('claude-sonnet-4-5-20250929'),\n  tools: { database: databaseTool },\n  messages: filteredMessages,\n  prompt: 'Get user data',\n});\n\nImpact: High - Breaks server-executed tool pattern with Anthropic provider\n\nStatus: Known issue, PR #11854 submitted\n\nMore Errors: https://ai-sdk.dev/docs/reference/ai-sdk-errors (31 total)"
      },
      {
        "title": "useChat Stale Closures with Memoized Options",
        "body": "Issue: When using useChat with memoized options (common for performance), the onData and onFinish callbacks have stale closures and don't see updated state variables.\n\nSource: GitHub Issue #11686\n\nReproduction:\n\nconst [count, setCount] = useState(0);\n\nconst chatOptions = useMemo(() => ({\n  onFinish: (message) => {\n    console.log('Count:', count); // ALWAYS 0, never updates!\n  },\n}), []); // Empty deps = stale closure\n\nconst { messages, append } = useChat(chatOptions);\n\nWorkaround 1 - Don't Memoize Callbacks:\n\nconst { messages, append } = useChat({\n  onFinish: (message) => {\n    console.log('Count:', count); // Now sees current count\n  },\n});\n\nWorkaround 2 - Use useRef:\n\nconst countRef = useRef(count);\nuseEffect(() => { countRef.current = count; }, [count]);\n\nconst chatOptions = useMemo(() => ({\n  onFinish: (message) => {\n    console.log('Count:', countRef.current); // Always current\n  },\n}), []);\n\nFull Repro: https://github.com/alechoey/ai-sdk-stale-ondata-repro"
      },
      {
        "title": "Stream Resumption Fails on Tab Switch",
        "body": "Issue: When users switch browser tabs or background the app during an AI stream, the stream does not resume when they return. The connection is lost and does not automatically reconnect.\n\nSource: GitHub Issue #11865\n\nImpact: High - Major UX issue for long-running streams\n\nWorkaround 1 - Implement onError Handler:\n\nconst { messages, append, reload } = useChat({\n  api: '/api/chat',\n  onError: (error) => {\n    if (error.message.includes('stream') || error.message.includes('aborted')) {\n      // Attempt to reload last message\n      reload();\n    }\n  },\n});\n\nWorkaround 2 - Detect Visibility Change:\n\nuseEffect(() => {\n  const handleVisibilityChange = () => {\n    if (document.visibilityState === 'visible') {\n      // Check if stream was interrupted\n      const lastMessage = messages[messages.length - 1];\n      if (lastMessage?.role === 'assistant' && !lastMessage.content) {\n        reload();\n      }\n    }\n  };\n\n  document.addEventListener('visibilitychange', handleVisibilityChange);\n  return () => document.removeEventListener('visibilitychange', handleVisibilityChange);\n}, [messages, reload]);\n\nStatus: Known limitation, no auto-reconnection built-in"
      },
      {
        "title": "Use ai-sdk-core when:",
        "body": "Building backend AI features (server-side text generation)\nImplementing server-side text generation (Node.js, Workers, Next.js)\nCreating structured AI outputs (JSON, forms, data extraction)\nBuilding AI agents with tools (multi-step workflows)\nIntegrating multiple AI providers (OpenAI, Anthropic, Google, Cloudflare)\nMigrating from AI SDK v4 to v5\nEncountering AI SDK errors (AI_APICallError, AI_NoObjectGeneratedError, etc.)\nUsing AI in Cloudflare Workers (with workers-ai-provider)\nUsing AI in Next.js Server Components/Actions\nNeed consistent API across different LLM providers"
      },
      {
        "title": "Don't use this skill when:",
        "body": "Building React chat UIs (use ai-sdk-ui skill instead)\nNeed frontend hooks like useChat (use ai-sdk-ui skill instead)\nNeed advanced topics like embeddings or image generation (check official docs)\nBuilding native Cloudflare Workers AI apps without multi-provider (use cloudflare-workers-ai skill instead)\nNeed Generative UI / RSC (see https://ai-sdk.dev/docs/ai-sdk-rsc)"
      },
      {
        "title": "Versions",
        "body": "AI SDK:\n\nStable: ai@6.0.26 (Jan 2026)\n⚠️ Skip v6.0.40 - Breaking streaming change (reverted in v6.0.41)\nLegacy v5: ai@5.0.117 (ai-v5 tag)\nZod 3.x/4.x both supported\n\nLatest Models (2026):\n\nOpenAI: GPT-5.2, GPT-5.1, GPT-5, o3, o3-mini, o4-mini\nAnthropic: Claude Sonnet 4.5, Opus 4.1, Haiku 4.5\nGoogle: Gemini 2.5 Pro/Flash/Lite\n\nCheck Latest:\n\nnpm view ai version\nnpm view ai dist-tags"
      },
      {
        "title": "Official Docs",
        "body": "Core:\n\nAI SDK v6: https://ai-sdk.dev/docs\nAI SDK Core: https://ai-sdk.dev/docs/ai-sdk-core/overview\nOutput API: https://ai-sdk.dev/docs/ai-sdk-core/generating-structured-data\nv4→v5 Migration: https://ai-sdk.dev/docs/migration-guides/migration-guide-5-0\nAll Errors (31): https://ai-sdk.dev/docs/reference/ai-sdk-errors\nProviders (69+): https://ai-sdk.dev/providers/overview\n\nMulti-Modal:\n\nSpeech: https://ai-sdk.dev/docs/ai-sdk-core/speech\nTranscription: https://ai-sdk.dev/docs/ai-sdk-core/transcription\nImage Generation: https://ai-sdk.dev/docs/ai-sdk-core/image-generation\nEmbeddings: https://ai-sdk.dev/docs/ai-sdk-core/embeddings\n\nGitHub:\n\nRepository: https://github.com/vercel/ai\nIssues: https://github.com/vercel/ai/issues\n\nLast Updated: 2026-01-20\nSkill Version: 2.1.0\nChanges: Added 3 new errors (Gemini caching, Anthropic tool errors, tool-result placement), MCP security guidance, tool approval best practices, React hooks edge cases, stream resumption workarounds\nAI SDK: 6.0.26 stable (avoid v6.0.40)"
      }
    ],
    "body": "AI SDK Core\n\nBackend AI with Vercel AI SDK v5 and v6.\n\nInstallation:\n\nnpm install ai @ai-sdk/openai @ai-sdk/anthropic @ai-sdk/google zod\n\nAI SDK 6 (Stable - January 2026)\n\nStatus: Stable Latest: ai@6.0.26 (Jan 2026)\n\nBREAKING: Output API Replaces generateObject/streamObject\n\n⚠️ CRITICAL: generateObject() and streamObject() are DEPRECATED and will be removed in a future version. Use the new Output API instead.\n\nBefore (v5 - DEPRECATED):\n\n// ❌ DEPRECATED - will be removed\nimport { generateObject } from 'ai';\n\nconst result = await generateObject({\n  model: openai('gpt-5'),\n  schema: z.object({ name: z.string(), age: z.number() }),\n  prompt: 'Generate a person',\n});\n\n\nAfter (v6 - USE THIS):\n\n// ✅ NEW OUTPUT API\nimport { generateText, Output } from 'ai';\n\nconst result = await generateText({\n  model: openai('gpt-5'),\n  output: Output.object({ schema: z.object({ name: z.string(), age: z.number() }) }),\n  prompt: 'Generate a person',\n});\n\n// Access the typed object\nconsole.log(result.object); // { name: \"Alice\", age: 30 }\n\nOutput Types\nimport { generateText, Output } from 'ai';\n\n// Object with Zod schema\noutput: Output.object({ schema: myZodSchema })\n\n// Array of typed objects\noutput: Output.array({ schema: personSchema })\n\n// Enum/choice from options\noutput: Output.choice({ choices: ['positive', 'negative', 'neutral'] })\n\n// Plain text (explicit)\noutput: Output.text()\n\n// Unstructured JSON (no schema validation)\noutput: Output.json()\n\nStreaming with Output API\nimport { streamText, Output } from 'ai';\n\nconst result = streamText({\n  model: openai('gpt-5'),\n  output: Output.object({ schema: personSchema }),\n  prompt: 'Generate a person',\n});\n\n// Stream partial objects\nfor await (const partialObject of result.objectStream) {\n  console.log(partialObject); // { name: \"Ali...\" } -> { name: \"Alice\", age: ... }\n}\n\n// Get final object\nconst finalObject = await result.object;\n\nv6 New Features\n\n1. Agent Abstraction Unified interface for building agents with ToolLoopAgent class:\n\nFull control over execution flow, tool loops, and state management\nReplaces manual tool calling orchestration\n\n2. Tool Execution Approval (Human-in-the-Loop)\n\nUse selective approval for better UX. Not every tool call needs approval.\n\ntools: {\n  payment: tool({\n    // Dynamic approval based on input\n    needsApproval: async ({ amount }) => amount > 1000,\n    inputSchema: z.object({ amount: z.number() }),\n    execute: async ({ amount }) => { /* process payment */ },\n  }),\n\n  readFile: tool({\n    needsApproval: false, // Safe operations don't need approval\n    inputSchema: z.object({ path: z.string() }),\n    execute: async ({ path }) => fs.readFile(path),\n  }),\n\n  deleteFile: tool({\n    needsApproval: true, // Destructive operations always need approval\n    inputSchema: z.object({ path: z.string() }),\n    execute: async ({ path }) => fs.unlink(path),\n  }),\n}\n\n\nBest Practices:\n\nUse dynamic approval for operations where risk depends on parameters (e.g., payment amount)\nAlways require approval for destructive operations (delete, modify, purchase)\nDon't require approval for safe read operations\nAdd system instruction: \"When a tool execution is not approved, do not retry it\"\nImplement timeout for approval requests to prevent stuck states\nStore user preferences for repeat actions\n\nSources:\n\nNext.js Human-in-the-Loop Guide\nCloudflare Agents Human-in-the-Loop\nPermit.io Best Practices\n\n3. Reranking for RAG\n\nimport { rerank } from 'ai';\n\nconst result = await rerank({\n  model: cohere.reranker('rerank-v3.5'),\n  query: 'user question',\n  documents: searchResults,\n  topK: 5,\n});\n\n\n4. MCP Tools (Model Context Protocol)\n\n⚠️ SECURITY WARNING: MCP tools have significant production risks. See security section below.\n\nimport { experimental_createMCPClient } from 'ai';\n\nconst mcpClient = await experimental_createMCPClient({\n  transport: { type: 'stdio', command: 'npx', args: ['-y', '@modelcontextprotocol/server-filesystem'] },\n});\n\nconst tools = await mcpClient.tools();\n\nconst result = await generateText({\n  model: openai('gpt-5'),\n  tools,\n  prompt: 'List files in the current directory',\n});\n\n\nKnown Issue: MCP tools may not execute in streaming mode (Vercel Community Discussion). Use generateText() instead of streamText() for MCP tools.\n\nMCP Security Considerations\n\n⚠️ CRITICAL: Dynamic MCP tools in production have security risks:\n\nRisks:\n\nTool definitions become part of your agent's prompt\nCan change unexpectedly without warning\nCompromised MCP server can inject malicious prompts\nNew tools can escalate user privileges (e.g., adding delete to read-only server)\n\nSolution - Use Static Tool Generation:\n\n// ❌ RISKY: Dynamic tools change without your control\nconst mcpClient = await experimental_createMCPClient({ /* ... */ });\nconst tools = await mcpClient.tools(); // Can change anytime!\n\n// ✅ SAFE: Generate static, versioned tool definitions\n// Step 1: Install mcp-to-ai-sdk\nnpm install -g mcp-to-ai-sdk\n\n// Step 2: Generate static tools (one-time, version controlled)\nnpx mcp-to-ai-sdk generate stdio 'npx -y @modelcontextprotocol/server-filesystem'\n\n// Step 3: Import static tools\nimport { tools } from './generated-mcp-tools';\n\nconst result = await generateText({\n  model: openai('gpt-5'),\n  tools, // Static, reviewed, versioned\n  prompt: 'Use tools',\n});\n\n\nBest Practice: Generate static tools, review them, commit to version control, and only update intentionally.\n\nSource: Vercel Blog: MCP Security\n\n5. Language Model Middleware\n\nimport { wrapLanguageModel, extractReasoningMiddleware } from 'ai';\n\nconst wrappedModel = wrapLanguageModel({\n  model: anthropic('claude-sonnet-4-5-20250929'),\n  middleware: extractReasoningMiddleware({ tagName: 'think' }),\n});\n\n// Reasoning extracted automatically from <think>...</think> tags\n\n\n6. Telemetry (OpenTelemetry)\n\nconst result = await generateText({\n  model: openai('gpt-5'),\n  prompt: 'Hello',\n  experimental_telemetry: {\n    isEnabled: true,\n    functionId: 'my-chat-function',\n    metadata: { userId: '123' },\n    recordInputs: true,\n    recordOutputs: true,\n  },\n});\n\n\nOfficial Docs: https://ai-sdk.dev/docs\n\nLatest AI Models (2025-2026)\nOpenAI\n\nGPT-5.2 (Dec 2025):\n\n400k context window, 128k output tokens\nEnhanced reasoning capabilities\nAvailable in API platform\n\nGPT-5.1 (Nov 2025):\n\nImproved speed and efficiency over GPT-5\n\"Warmer\" and more intelligent responses\n\nGPT-5 (Aug 2025):\n\n45% less hallucination than GPT-4o\nState-of-the-art in math, coding, visual perception\n\no3 Reasoning Models (Dec 2025):\n\no3, o3-pro, o3-mini - Advanced reasoning\no4-mini - Fast reasoning\nimport { openai } from '@ai-sdk/openai';\nconst gpt52 = openai('gpt-5.2');\nconst gpt51 = openai('gpt-5.1');\nconst gpt5 = openai('gpt-5');\nconst o3 = openai('o3');\nconst o3mini = openai('o3-mini');\n\nAnthropic\n\nClaude 4 Family (May-Oct 2025):\n\nOpus 4 (May 22): Best for complex reasoning, $15/$75 per million tokens\nSonnet 4 (May 22): Balanced performance, $3/$15 per million tokens\nOpus 4.1 (Aug 5): Enhanced agentic tasks, real-world coding\nSonnet 4.5 (Sept 29): Most capable for coding, agents, computer use\nHaiku 4.5 (Oct 15): Small, fast, low-latency model\nimport { anthropic } from '@ai-sdk/anthropic';\nconst sonnet45 = anthropic('claude-sonnet-4-5-20250929');  // Latest\nconst opus41 = anthropic('claude-opus-4-1-20250805');\nconst haiku45 = anthropic('claude-haiku-4-5-20251015');\n\nGoogle\n\nGemini 2.5 Family (Mar-Sept 2025):\n\nPro (March 2025): Most intelligent, #1 on LMArena at launch\nPro Deep Think (May 2025): Enhanced reasoning mode\nFlash (May 2025): Fast, cost-effective\nFlash-Lite (Sept 2025): Updated efficiency\nimport { google } from '@ai-sdk/google';\nconst pro = google('gemini-2.5-pro');\nconst flash = google('gemini-2.5-flash');\nconst lite = google('gemini-2.5-flash-lite');\n\nCore Functions\nText Generation\n\ngenerateText() - Text completion with tools streamText() - Real-time streaming\n\nStructured Output (v6 Output API)\n\nOutput.object() - Typed objects with Zod schema (replaces generateObject) Output.array() - Typed arrays Output.choice() - Enum selection Output.json() - Unstructured JSON\n\nSee \"AI SDK 6\" section above for usage examples.\n\nMulti-Modal Capabilities\nSpeech Synthesis (Text-to-Speech)\nimport { experimental_generateSpeech as generateSpeech } from 'ai';\nimport { openai } from '@ai-sdk/openai';\n\nconst result = await generateSpeech({\n  model: openai.speech('tts-1-hd'),\n  voice: 'alloy',\n  text: 'Hello, how can I help you today?',\n});\n\n// result.audio is an ArrayBuffer containing the audio\nconst audioBuffer = result.audio;\n\n\nSupported Providers:\n\nOpenAI: tts-1, tts-1-hd, gpt-4o-mini-tts\nElevenLabs: eleven_multilingual_v2, eleven_turbo_v2\nLMNT, Hume\nTranscription (Speech-to-Text)\nimport { experimental_transcribe as transcribe } from 'ai';\nimport { openai } from '@ai-sdk/openai';\n\nconst result = await transcribe({\n  model: openai.transcription('whisper-1'),\n  audio: audioFile, // File, Blob, ArrayBuffer, or URL\n});\n\nconsole.log(result.text); // Transcribed text\nconsole.log(result.segments); // Timestamped segments\n\n\nSupported Providers:\n\nOpenAI: whisper-1\nElevenLabs, Deepgram, AssemblyAI, Groq, Rev.ai\nImage Generation\nimport { generateImage } from 'ai';\nimport { openai } from '@ai-sdk/openai';\n\nconst result = await generateImage({\n  model: openai.image('dall-e-3'),\n  prompt: 'A futuristic city at sunset',\n  size: '1024x1024',\n  n: 1,\n});\n\n// result.images is an array of generated images\nconst imageUrl = result.images[0].url;\nconst imageBase64 = result.images[0].base64;\n\n\nSupported Providers:\n\nOpenAI: dall-e-2, dall-e-3\nGoogle: imagen-3.0\nFal AI, Black Forest Labs (Flux), Luma AI, Replicate\nEmbeddings\nimport { embed, embedMany, cosineSimilarity } from 'ai';\nimport { openai } from '@ai-sdk/openai';\n\n// Single embedding\nconst result = await embed({\n  model: openai.embedding('text-embedding-3-small'),\n  value: 'Hello world',\n});\nconsole.log(result.embedding); // number[]\n\n// Multiple embeddings (parallel processing)\nconst results = await embedMany({\n  model: openai.embedding('text-embedding-3-small'),\n  values: ['Hello', 'World', 'AI'],\n  maxParallelCalls: 5, // Parallel processing\n});\n\n// Compare similarity\nconst similarity = cosineSimilarity(\n  results.embeddings[0],\n  results.embeddings[1]\n);\nconsole.log(`Similarity: ${similarity}`); // 0.0 to 1.0\n\n\nSupported Providers:\n\nOpenAI: text-embedding-3-small, text-embedding-3-large\nGoogle: text-embedding-004\nCohere, Voyage AI, Mistral, Amazon Bedrock\nMulti-Modal Prompts (Files, Images, PDFs)\nimport { generateText } from 'ai';\nimport { google } from '@ai-sdk/google';\n\nconst result = await generateText({\n  model: google('gemini-2.5-pro'),\n  messages: [{\n    role: 'user',\n    content: [\n      { type: 'text', text: 'Summarize this document' },\n      { type: 'file', data: pdfBuffer, mimeType: 'application/pdf' },\n    ],\n  }],\n});\n\n// Or with images\nconst result = await generateText({\n  model: openai('gpt-5'),\n  messages: [{\n    role: 'user',\n    content: [\n      { type: 'text', text: 'What is in this image?' },\n      { type: 'image', image: imageBuffer },\n    ],\n  }],\n});\n\n\nSee official docs for full API: https://ai-sdk.dev/docs/ai-sdk-core\n\nv5 Stream Response Methods\n\nWhen returning streaming responses from an API, use the correct method:\n\nMethod\tOutput Format\tUse Case\ntoTextStreamResponse()\tPlain text chunks\tSimple text streaming\ntoUIMessageStreamResponse()\tSSE with JSON events\tChat UIs (text-start, text-delta, text-end, finish)\n\nFor chat widgets and UIs, always use toUIMessageStreamResponse():\n\nconst result = streamText({\n  model: workersai('@cf/qwen/qwen3-30b-a3b-fp8'),\n  messages,\n  system: 'You are helpful.',\n});\n\n// ✅ For chat UIs - returns SSE with JSON events\nreturn result.toUIMessageStreamResponse({\n  headers: { 'Access-Control-Allow-Origin': '*' },\n});\n\n// ❌ For simple text - returns plain text chunks only\nreturn result.toTextStreamResponse();\n\n\nNote: toDataStreamResponse() does NOT exist in AI SDK v5 (common misconception).\n\nworkers-ai-provider Version Compatibility\n\nIMPORTANT: workers-ai-provider@2.x requires AI SDK v5, NOT v4.\n\n# ✅ Correct - AI SDK v5 with workers-ai-provider v2\nnpm install ai@^5.0.0 workers-ai-provider@^2.0.0 zod@^3.25.0\n\n# ❌ Wrong - AI SDK v4 causes error\nnpm install ai@^4.0.0 workers-ai-provider@^2.0.0\n# Error: \"AI SDK 4 only supports models that implement specification version v1\"\n\n\nZod Version: AI SDK v5 requires zod@^3.25.0 or later for zod/v3 and zod/v4 exports. Older versions (3.22.x) cause build errors: \"Could not resolve zod/v4\".\n\nCloudflare Workers Startup Fix\n\nProblem: AI SDK v5 + Zod causes >270ms startup time (exceeds Workers 400ms limit).\n\nSolution:\n\n// ❌ BAD: Top-level imports cause startup overhead\nimport { createWorkersAI } from 'workers-ai-provider';\nconst workersai = createWorkersAI({ binding: env.AI });\n\n// ✅ GOOD: Lazy initialization inside handler\napp.post('/chat', async (c) => {\n  const { createWorkersAI } = await import('workers-ai-provider');\n  const workersai = createWorkersAI({ binding: c.env.AI });\n  // ...\n});\n\n\nAdditional:\n\nMinimize top-level Zod schemas\nMove complex schemas into route handlers\nMonitor startup time with Wrangler\nv5 Tool Calling Changes\n\nBreaking Changes:\n\nparameters → inputSchema (Zod schema)\nTool properties: args → input, result → output\nToolExecutionError removed (now tool-error content parts)\nmaxSteps parameter removed → Use stopWhen(stepCountIs(n))\n\nNew in v5:\n\nDynamic tools (add tools at runtime based on context)\nAgent class (multi-step execution with tools)\nCritical v4→v5 Migration\n\nAI SDK v5 introduced extensive breaking changes. If migrating from v4, follow this guide.\n\nBreaking Changes Overview\n\nParameter Renames\n\nmaxTokens → maxOutputTokens\nproviderMetadata → providerOptions\n\nTool Definitions\n\nparameters → inputSchema\nTool properties: args → input, result → output\n\nMessage Types\n\nCoreMessage → ModelMessage\nMessage → UIMessage\nconvertToCoreMessages → convertToModelMessages\n\nTool Error Handling\n\nToolExecutionError class removed\nNow tool-error content parts\nEnables automated retry\n\nMulti-Step Execution\n\nmaxSteps → stopWhen\nUse stepCountIs() or hasToolCall()\n\nMessage Structure\n\nSimple content string → parts array\nParts: text, file, reasoning, tool-call, tool-result\n\nStreaming Architecture\n\nSingle chunk → start/delta/end lifecycle\nUnique IDs for concurrent streams\n\nTool Streaming\n\nEnabled by default\ntoolCallStreaming option removed\n\nPackage Reorganization\n\nai/rsc → @ai-sdk/rsc\nai/react → @ai-sdk/react\nLangChainAdapter → @ai-sdk/langchain\nMigration Examples\n\nBefore (v4):\n\nimport { generateText } from 'ai';\n\nconst result = await generateText({\n  model: openai.chat('gpt-4-turbo'),\n  maxTokens: 500,\n  providerMetadata: { openai: { user: 'user-123' } },\n  tools: {\n    weather: {\n      description: 'Get weather',\n      parameters: z.object({ location: z.string() }),\n      execute: async (args) => { /* args.location */ },\n    },\n  },\n  maxSteps: 5,\n});\n\n\nAfter (v5):\n\nimport { generateText, tool, stopWhen, stepCountIs } from 'ai';\n\nconst result = await generateText({\n  model: openai('gpt-4-turbo'),\n  maxOutputTokens: 500,\n  providerOptions: { openai: { user: 'user-123' } },\n  tools: {\n    weather: tool({\n      description: 'Get weather',\n      inputSchema: z.object({ location: z.string() }),\n      execute: async ({ location }) => { /* input.location */ },\n    }),\n  },\n  stopWhen: stepCountIs(5),\n});\n\nMigration Checklist\n Update all maxTokens to maxOutputTokens\n Update providerMetadata to providerOptions\n Convert tool parameters to inputSchema\n Update tool execute functions: args → input\n Replace maxSteps with stopWhen(stepCountIs(n))\n Update message types: CoreMessage → ModelMessage\n Remove ToolExecutionError handling\n Update package imports (ai/rsc → @ai-sdk/rsc)\n Test streaming behavior (architecture changed)\n Update TypeScript types\nAutomated Migration\n\nAI SDK provides a migration tool:\n\nnpx ai migrate\n\n\nThis will update most breaking changes automatically. Review changes carefully.\n\nOfficial Migration Guide: https://ai-sdk.dev/docs/migration-guides/migration-guide-5-0\n\nTop 15 Errors & Solutions\n1. AI_APICallError\n\nCause: API request failed (network, auth, rate limit).\n\nSolution:\n\nimport { AI_APICallError } from 'ai';\n\ntry {\n  const result = await generateText({\n    model: openai('gpt-4-turbo'),\n    prompt: 'Hello',\n  });\n} catch (error) {\n  if (error instanceof AI_APICallError) {\n    console.error('API call failed:', error.message);\n    console.error('Status code:', error.statusCode);\n    console.error('Response:', error.responseBody);\n\n    // Check common causes\n    if (error.statusCode === 401) {\n      // Invalid API key\n    } else if (error.statusCode === 429) {\n      // Rate limit - implement backoff\n    } else if (error.statusCode >= 500) {\n      // Provider issue - retry\n    }\n  }\n}\n\n\nPrevention:\n\nValidate API keys at startup\nImplement retry logic with exponential backoff\nMonitor rate limits\nHandle network errors gracefully\n2. AI_NoObjectGeneratedError\n\nCause: Model didn't generate valid object matching schema.\n\nSolution:\n\nimport { AI_NoObjectGeneratedError } from 'ai';\n\ntry {\n  const result = await generateObject({\n    model: openai('gpt-4-turbo'),\n    schema: z.object({ /* complex schema */ }),\n    prompt: 'Generate data',\n  });\n} catch (error) {\n  if (error instanceof AI_NoObjectGeneratedError) {\n    console.error('No valid object generated');\n\n    // Solutions:\n    // 1. Simplify schema\n    // 2. Add more context to prompt\n    // 3. Provide examples in prompt\n    // 4. Try different model (gpt-5 or claude-sonnet-4-5 for complex objects)\n  }\n}\n\n\nPrevention:\n\nStart with simple schemas, add complexity incrementally\nInclude examples in prompt: \"Generate a person like: { name: 'Alice', age: 30 }\"\nUse GPT-4 for complex structured output\nTest schemas with sample data first\n3. Worker Startup Limit (270ms+)\n\nCause: AI SDK v5 + Zod initialization overhead in Cloudflare Workers exceeds startup limits.\n\nSolution:\n\n// BAD: Top-level imports cause startup overhead\nimport { createWorkersAI } from 'workers-ai-provider';\nimport { complexSchema } from './schemas';\n\nconst workersai = createWorkersAI({ binding: env.AI });\n\n// GOOD: Lazy initialization inside handler\nexport default {\n  async fetch(request, env) {\n    const { createWorkersAI } = await import('workers-ai-provider');\n    const workersai = createWorkersAI({ binding: env.AI });\n\n    // Use workersai here\n  }\n}\n\n\nPrevention:\n\nMove AI SDK imports inside route handlers\nMinimize top-level Zod schemas\nMonitor Worker startup time (must be <400ms)\nUse Wrangler's startup time reporting\n\nGitHub Issue: Search for \"Workers startup limit\" in Vercel AI SDK issues\n\n4. streamText Fails Silently\n\nCause: Stream errors can be swallowed by createDataStreamResponse.\n\nStatus: ✅ RESOLVED - Fixed in ai@4.1.22 (February 2025)\n\nSolution (Recommended):\n\n// Use the onError callback (added in v4.1.22)\nconst stream = streamText({\n  model: openai('gpt-4-turbo'),\n  prompt: 'Hello',\n  onError({ error }) {\n    console.error('Stream error:', error);\n    // Custom error logging and handling\n  },\n});\n\n// Stream safely\nfor await (const chunk of stream.textStream) {\n  process.stdout.write(chunk);\n}\n\n\nAlternative (Manual try-catch):\n\n// Fallback if not using onError callback\ntry {\n  const stream = streamText({\n    model: openai('gpt-4-turbo'),\n    prompt: 'Hello',\n  });\n\n  for await (const chunk of stream.textStream) {\n    process.stdout.write(chunk);\n  }\n} catch (error) {\n  console.error('Stream error:', error);\n}\n\n\nPrevention:\n\nUse onError callback for proper error capture (recommended)\nImplement server-side error monitoring\nTest stream error handling explicitly\nAlways log on server side in production\n\nGitHub Issue: #4726 (RESOLVED)\n\n5. AI_LoadAPIKeyError\n\nCause: Missing or invalid API key.\n\nSolution:\n\nimport { AI_LoadAPIKeyError } from 'ai';\n\ntry {\n  const result = await generateText({\n    model: openai('gpt-4-turbo'),\n    prompt: 'Hello',\n  });\n} catch (error) {\n  if (error instanceof AI_LoadAPIKeyError) {\n    console.error('API key error:', error.message);\n\n    // Check:\n    // 1. .env file exists and loaded\n    // 2. Correct env variable name (OPENAI_API_KEY)\n    // 3. Key format is valid (starts with sk-)\n  }\n}\n\n\nPrevention:\n\nValidate API keys at application startup\nUse environment variable validation (e.g., zod)\nProvide clear error messages in development\nDocument required environment variables\n6. AI_InvalidArgumentError\n\nCause: Invalid parameters passed to function.\n\nSolution:\n\nimport { AI_InvalidArgumentError } from 'ai';\n\ntry {\n  const result = await generateText({\n    model: openai('gpt-4-turbo'),\n    maxOutputTokens: -1,  // Invalid!\n    prompt: 'Hello',\n  });\n} catch (error) {\n  if (error instanceof AI_InvalidArgumentError) {\n    console.error('Invalid argument:', error.message);\n    // Check parameter types and values\n  }\n}\n\n\nPrevention:\n\nUse TypeScript for type checking\nValidate inputs before calling AI SDK functions\nRead function signatures carefully\nCheck official docs for parameter constraints\n7. AI_NoContentGeneratedError\n\nCause: Model generated no content (safety filters, etc.).\n\nSolution:\n\nimport { AI_NoContentGeneratedError } from 'ai';\n\ntry {\n  const result = await generateText({\n    model: openai('gpt-4-turbo'),\n    prompt: 'Some prompt',\n  });\n} catch (error) {\n  if (error instanceof AI_NoContentGeneratedError) {\n    console.error('No content generated');\n\n    // Possible causes:\n    // 1. Safety filters blocked output\n    // 2. Prompt triggered content policy\n    // 3. Model configuration issue\n\n    // Handle gracefully:\n    return { text: 'Unable to generate response. Please try different input.' };\n  }\n}\n\n\nPrevention:\n\nSanitize user inputs\nAvoid prompts that may trigger safety filters\nHave fallback messaging\nLog occurrences for analysis\n8. AI_TypeValidationError\n\nCause: Zod schema validation failed on generated output.\n\nSolution:\n\nimport { AI_TypeValidationError } from 'ai';\n\ntry {\n  const result = await generateObject({\n    model: openai('gpt-4-turbo'),\n    schema: z.object({\n      age: z.number().min(0).max(120),  // Strict validation\n    }),\n    prompt: 'Generate person',\n  });\n} catch (error) {\n  if (error instanceof AI_TypeValidationError) {\n    console.error('Validation failed:', error.message);\n\n    // Solutions:\n    // 1. Relax schema constraints\n    // 2. Add more guidance in prompt\n    // 3. Use .optional() for unreliable fields\n  }\n}\n\n\nPrevention:\n\nStart with lenient schemas, tighten gradually\nUse .optional() for fields that may not always be present\nAdd validation hints in field descriptions\nTest with various prompts\n9. AI_RetryError\n\nCause: All retry attempts failed.\n\nSolution:\n\nimport { AI_RetryError } from 'ai';\n\ntry {\n  const result = await generateText({\n    model: openai('gpt-4-turbo'),\n    prompt: 'Hello',\n    maxRetries: 3,  // Default is 2\n  });\n} catch (error) {\n  if (error instanceof AI_RetryError) {\n    console.error('All retries failed');\n    console.error('Last error:', error.lastError);\n\n    // Check root cause:\n    // - Persistent network issue\n    // - Provider outage\n    // - Invalid configuration\n  }\n}\n\n\nPrevention:\n\nInvestigate root cause of failures\nAdjust retry configuration if needed\nImplement circuit breaker pattern for provider outages\nHave fallback providers\n10. Rate Limiting Errors\n\nCause: Exceeded provider rate limits (RPM/TPM).\n\nSolution:\n\n// Implement exponential backoff\nasync function generateWithBackoff(prompt: string, retries = 3) {\n  for (let i = 0; i < retries; i++) {\n    try {\n      return await generateText({\n        model: openai('gpt-4-turbo'),\n        prompt,\n      });\n    } catch (error) {\n      if (error instanceof AI_APICallError && error.statusCode === 429) {\n        const delay = Math.pow(2, i) * 1000;  // Exponential backoff\n        console.log(`Rate limited, waiting ${delay}ms`);\n        await new Promise(resolve => setTimeout(resolve, delay));\n      } else {\n        throw error;\n      }\n    }\n  }\n  throw new Error('Rate limit retries exhausted');\n}\n\n\nPrevention:\n\nMonitor rate limit headers\nQueue requests to stay under limits\nUpgrade provider tier if needed\nImplement request throttling\n11. TypeScript Performance with Zod\n\nCause: Complex Zod schemas slow down TypeScript type checking.\n\nSolution:\n\n// Instead of deeply nested schemas at top level:\n// const complexSchema = z.object({ /* 100+ fields */ });\n\n// Define inside functions or use type assertions:\nfunction generateData() {\n  const schema = z.object({ /* complex schema */ });\n  return generateObject({ model: openai('gpt-4-turbo'), schema, prompt: '...' });\n}\n\n// Or use z.lazy() for recursive schemas:\ntype Category = { name: string; subcategories?: Category[] };\nconst CategorySchema: z.ZodType<Category> = z.lazy(() =>\n  z.object({\n    name: z.string(),\n    subcategories: z.array(CategorySchema).optional(),\n  })\n);\n\n\nPrevention:\n\nAvoid top-level complex schemas\nUse z.lazy() for recursive types\nSplit large schemas into smaller ones\nUse type assertions where appropriate\n\nOfficial Docs: https://ai-sdk.dev/docs/troubleshooting/common-issues/slow-type-checking\n\n12. Invalid JSON Response (Provider-Specific)\n\nCause: Some models occasionally return invalid JSON.\n\nSolution:\n\n// Use built-in retry and mode selection\nconst result = await generateObject({\n  model: openai('gpt-4-turbo'),\n  schema: mySchema,\n  prompt: 'Generate data',\n  mode: 'json',  // Force JSON mode (supported by GPT-4)\n  maxRetries: 3,  // Retry on invalid JSON\n});\n\n// Or catch and retry manually:\ntry {\n  const result = await generateObject({\n    model: openai('gpt-4-turbo'),\n    schema: mySchema,\n    prompt: 'Generate data',\n  });\n} catch (error) {\n  // Retry with different model\n  const result = await generateObject({\n    model: openai('gpt-4-turbo'),\n    schema: mySchema,\n    prompt: 'Generate data',\n  });\n}\n\n\nPrevention:\n\nUse mode: 'json' when available\nPrefer GPT-4 for structured output\nImplement retry logic\nValidate responses\n\nGitHub Issue: #4302 (Imagen 3.0 Invalid JSON)\n\n13. Gemini Implicit Caching Fails with Tools\n\nError: No error, but higher API costs due to disabled caching Cause: Google Gemini 3 Flash's cost-saving implicit caching doesn't work when any tools are defined, even if never used. Source: GitHub Issue #11513\n\nWhy It Happens: Gemini API disables caching when tools are present in the request, regardless of whether they're invoked.\n\nPrevention:\n\n// Conditionally add tools only when needed\nconst needsTools = await analyzePrompt(userInput);\n\nconst result = await generateText({\n  model: google('gemini-3-flash'),\n  tools: needsTools ? { weather: weatherTool } : undefined,\n  prompt: userInput,\n});\n\n\nImpact: High - Can significantly increase API costs for repeated context\n\n14. Anthropic Tool Error Results Cause JSON Parse Crash\n\nError: SyntaxError: \"[object Object]\" is not valid JSON Cause: Anthropic provider built-in tools (web_fetch, etc.) return error objects that SDK tries to JSON.parse Source: GitHub Issue #11856\n\nWhy It Happens: When Anthropic built-in tools fail (e.g., url_not_allowed), they return error objects. AI SDK incorrectly tries to parse these as JSON strings.\n\nPrevention:\n\ntry {\n  const result = await generateText({\n    model: anthropic('claude-sonnet-4-5-20250929'),\n    tools: { web_fetch: { type: 'anthropic_defined', name: 'web_fetch' } },\n    prompt: userPrompt,\n  });\n} catch (error) {\n  if (error.message.includes('is not valid JSON')) {\n    // Tool returned error result, handle gracefully\n    console.error('Tool execution failed - likely blocked URL or permission issue');\n    // Retry without tool or use custom tool\n  }\n  throw error;\n}\n\n\nImpact: High - Production crashes when using Anthropic built-in tools\n\n15. Tool-Result in Assistant Message (Anthropic)\n\nError: Anthropic API error - tool-result in assistant message not allowed Cause: Server-executed tools incorrectly place tool-result parts in assistant messages Source: GitHub Issue #11855\n\nWhy It Happens: When using server-executed tools (tools where execute runs on server, not sent to model), the AI SDK incorrectly includes tool-result parts in the assistant message. Anthropic expects tool-result only in user messages.\n\nPrevention:\n\n// Workaround: Filter messages before sending\nconst filteredMessages = messages.map(msg => {\n  if (msg.role === 'assistant') {\n    return {\n      ...msg,\n      content: msg.content.filter(part => part.type !== 'tool-result'),\n    };\n  }\n  return msg;\n});\n\nconst result = await generateText({\n  model: anthropic('claude-sonnet-4-5-20250929'),\n  tools: { database: databaseTool },\n  messages: filteredMessages,\n  prompt: 'Get user data',\n});\n\n\nImpact: High - Breaks server-executed tool pattern with Anthropic provider\n\nStatus: Known issue, PR #11854 submitted\n\nMore Errors: https://ai-sdk.dev/docs/reference/ai-sdk-errors (31 total)\n\nKnown Issues & Limitations\nuseChat Stale Closures with Memoized Options\n\nIssue: When using useChat with memoized options (common for performance), the onData and onFinish callbacks have stale closures and don't see updated state variables.\n\nSource: GitHub Issue #11686\n\nReproduction:\n\nconst [count, setCount] = useState(0);\n\nconst chatOptions = useMemo(() => ({\n  onFinish: (message) => {\n    console.log('Count:', count); // ALWAYS 0, never updates!\n  },\n}), []); // Empty deps = stale closure\n\nconst { messages, append } = useChat(chatOptions);\n\n\nWorkaround 1 - Don't Memoize Callbacks:\n\nconst { messages, append } = useChat({\n  onFinish: (message) => {\n    console.log('Count:', count); // Now sees current count\n  },\n});\n\n\nWorkaround 2 - Use useRef:\n\nconst countRef = useRef(count);\nuseEffect(() => { countRef.current = count; }, [count]);\n\nconst chatOptions = useMemo(() => ({\n  onFinish: (message) => {\n    console.log('Count:', countRef.current); // Always current\n  },\n}), []);\n\n\nFull Repro: https://github.com/alechoey/ai-sdk-stale-ondata-repro\n\nStream Resumption Fails on Tab Switch\n\nIssue: When users switch browser tabs or background the app during an AI stream, the stream does not resume when they return. The connection is lost and does not automatically reconnect.\n\nSource: GitHub Issue #11865\n\nImpact: High - Major UX issue for long-running streams\n\nWorkaround 1 - Implement onError Handler:\n\nconst { messages, append, reload } = useChat({\n  api: '/api/chat',\n  onError: (error) => {\n    if (error.message.includes('stream') || error.message.includes('aborted')) {\n      // Attempt to reload last message\n      reload();\n    }\n  },\n});\n\n\nWorkaround 2 - Detect Visibility Change:\n\nuseEffect(() => {\n  const handleVisibilityChange = () => {\n    if (document.visibilityState === 'visible') {\n      // Check if stream was interrupted\n      const lastMessage = messages[messages.length - 1];\n      if (lastMessage?.role === 'assistant' && !lastMessage.content) {\n        reload();\n      }\n    }\n  };\n\n  document.addEventListener('visibilitychange', handleVisibilityChange);\n  return () => document.removeEventListener('visibilitychange', handleVisibilityChange);\n}, [messages, reload]);\n\n\nStatus: Known limitation, no auto-reconnection built-in\n\nWhen to Use This Skill\nUse ai-sdk-core when:\nBuilding backend AI features (server-side text generation)\nImplementing server-side text generation (Node.js, Workers, Next.js)\nCreating structured AI outputs (JSON, forms, data extraction)\nBuilding AI agents with tools (multi-step workflows)\nIntegrating multiple AI providers (OpenAI, Anthropic, Google, Cloudflare)\nMigrating from AI SDK v4 to v5\nEncountering AI SDK errors (AI_APICallError, AI_NoObjectGeneratedError, etc.)\nUsing AI in Cloudflare Workers (with workers-ai-provider)\nUsing AI in Next.js Server Components/Actions\nNeed consistent API across different LLM providers\nDon't use this skill when:\nBuilding React chat UIs (use ai-sdk-ui skill instead)\nNeed frontend hooks like useChat (use ai-sdk-ui skill instead)\nNeed advanced topics like embeddings or image generation (check official docs)\nBuilding native Cloudflare Workers AI apps without multi-provider (use cloudflare-workers-ai skill instead)\nNeed Generative UI / RSC (see https://ai-sdk.dev/docs/ai-sdk-rsc)\nVersions\n\nAI SDK:\n\nStable: ai@6.0.26 (Jan 2026)\n⚠️ Skip v6.0.40 - Breaking streaming change (reverted in v6.0.41)\nLegacy v5: ai@5.0.117 (ai-v5 tag)\nZod 3.x/4.x both supported\n\nLatest Models (2026):\n\nOpenAI: GPT-5.2, GPT-5.1, GPT-5, o3, o3-mini, o4-mini\nAnthropic: Claude Sonnet 4.5, Opus 4.1, Haiku 4.5\nGoogle: Gemini 2.5 Pro/Flash/Lite\n\nCheck Latest:\n\nnpm view ai version\nnpm view ai dist-tags\n\nOfficial Docs\n\nCore:\n\nAI SDK v6: https://ai-sdk.dev/docs\nAI SDK Core: https://ai-sdk.dev/docs/ai-sdk-core/overview\nOutput API: https://ai-sdk.dev/docs/ai-sdk-core/generating-structured-data\nv4→v5 Migration: https://ai-sdk.dev/docs/migration-guides/migration-guide-5-0\nAll Errors (31): https://ai-sdk.dev/docs/reference/ai-sdk-errors\nProviders (69+): https://ai-sdk.dev/providers/overview\n\nMulti-Modal:\n\nSpeech: https://ai-sdk.dev/docs/ai-sdk-core/speech\nTranscription: https://ai-sdk.dev/docs/ai-sdk-core/transcription\nImage Generation: https://ai-sdk.dev/docs/ai-sdk-core/image-generation\nEmbeddings: https://ai-sdk.dev/docs/ai-sdk-core/embeddings\n\nGitHub:\n\nRepository: https://github.com/vercel/ai\nIssues: https://github.com/vercel/ai/issues\n\nLast Updated: 2026-01-20 Skill Version: 2.1.0 Changes: Added 3 new errors (Gemini caching, Anthropic tool errors, tool-result placement), MCP security guidance, tool approval best practices, React hooks edge cases, stream resumption workarounds AI SDK: 6.0.26 stable (avoid v6.0.40)"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/Veeramanikandanr48/ai-sdk-core",
    "publisherUrl": "https://clawhub.ai/Veeramanikandanr48/ai-sdk-core",
    "owner": "Veeramanikandanr48",
    "version": "0.1.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/ai-sdk-core",
    "downloadUrl": "https://openagent3.xyz/downloads/ai-sdk-core",
    "agentUrl": "https://openagent3.xyz/skills/ai-sdk-core/agent",
    "manifestUrl": "https://openagent3.xyz/skills/ai-sdk-core/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/ai-sdk-core/agent.md"
  }
}