{
  "schemaVersion": "1.0",
  "item": {
    "slug": "context-engineering",
    "name": "Agent-Skills-for-Context-Engineering",
    "source": "tencent",
    "type": "skill",
    "category": "AI 智能",
    "sourceUrl": "https://clawhub.ai/leoyessi10-tech/context-engineering",
    "canonicalUrl": "https://clawhub.ai/leoyessi10-tech/context-engineering",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/context-engineering",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=context-engineering",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SKILL.md",
      "scripts/compression_evaluator.py",
      "references/evaluation-framework.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-23T16:43:11.935Z",
      "expiresAt": "2026-04-30T16:43:11.935Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
        "contentDisposition": "attachment; filename=\"4claw-imageboard-1.0.1.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/context-engineering"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/context-engineering",
    "agentPageUrl": "https://openagent3.xyz/skills/context-engineering/agent",
    "manifestUrl": "https://openagent3.xyz/skills/context-engineering/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/context-engineering/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Context Compression Strategies",
        "body": "When agent sessions generate millions of tokens of conversation history, compression becomes mandatory. The naive approach is aggressive compression to minimize tokens per request. The correct optimization target is tokens per task: total tokens consumed to complete a task, including re-fetching costs when compression loses critical information."
      },
      {
        "title": "When to Activate",
        "body": "Activate this skill when:\n\nAgent sessions exceed context window limits\nCodebases exceed context windows (5M+ token systems)\nDesigning conversation summarization strategies\nDebugging cases where agents \"forget\" what files they modified\nBuilding evaluation frameworks for compression quality"
      },
      {
        "title": "Core Concepts",
        "body": "Context compression trades token savings against information loss. Three production-ready approaches exist:\n\nAnchored Iterative Summarization: Maintain structured, persistent summaries with explicit sections for session intent, file modifications, decisions, and next steps. When compression triggers, summarize only the newly-truncated span and merge with the existing summary. Structure forces preservation by dedicating sections to specific information types.\n\n\nOpaque Compression: Produce compressed representations optimized for reconstruction fidelity. Achieves highest compression ratios (99%+) but sacrifices interpretability. Cannot verify what was preserved.\n\n\nRegenerative Full Summary: Generate detailed structured summaries on each compression. Produces readable output but may lose details across repeated compression cycles due to full regeneration rather than incremental merging.\n\nThe critical insight: structure forces preservation. Dedicated sections act as checklists that the summarizer must populate, preventing silent information drift."
      },
      {
        "title": "Why Tokens-Per-Task Matters",
        "body": "Traditional compression metrics target tokens-per-request. This is the wrong optimization. When compression loses critical details like file paths or error messages, the agent must re-fetch information, re-explore approaches, and waste tokens recovering context.\n\nThe right metric is tokens-per-task: total tokens consumed from task start to completion. A compression strategy saving 0.5% more tokens but causing 20% more re-fetching costs more overall."
      },
      {
        "title": "The Artifact Trail Problem",
        "body": "Artifact trail integrity is the weakest dimension across all compression methods, scoring 2.2-2.5 out of 5.0 in evaluations. Even structured summarization with explicit file sections struggles to maintain complete file tracking across long sessions.\n\nCoding agents need to know:\n\nWhich files were created\nWhich files were modified and what changed\nWhich files were read but not changed\nFunction names, variable names, error messages\n\nThis problem likely requires specialized handling beyond general summarization: a separate artifact index or explicit file-state tracking in agent scaffolding."
      },
      {
        "title": "Structured Summary Sections",
        "body": "Effective structured summaries include explicit sections:\n\n## Session Intent\n[What the user is trying to accomplish]\n\n## Files Modified\n- auth.controller.ts: Fixed JWT token generation\n- config/redis.ts: Updated connection pooling\n- tests/auth.test.ts: Added mock setup for new config\n\n## Decisions Made\n- Using Redis connection pool instead of per-request connections\n- Retry logic with exponential backoff for transient failures\n\n## Current State\n- 14 tests passing, 2 failing\n- Remaining: mock setup for session service tests\n\n## Next Steps\n1. Fix remaining test failures\n2. Run full test suite\n3. Update documentation\n\nThis structure prevents silent loss of file paths or decisions because each section must be explicitly addressed."
      },
      {
        "title": "Compression Trigger Strategies",
        "body": "When to trigger compression matters as much as how to compress:\n\nStrategyTrigger PointTrade-offFixed threshold70-80% context utilizationSimple but may compress too earlySliding windowKeep last N turns + summaryPredictable context sizeImportance-basedCompress low-relevance sections firstComplex but preserves signalTask-boundaryCompress at logical task completionsClean summaries but unpredictable timing\n\nThe sliding window approach with structured summaries provides the best balance of predictability and quality for most coding agent use cases."
      },
      {
        "title": "Probe-Based Evaluation",
        "body": "Traditional metrics like ROUGE or embedding similarity fail to capture functional compression quality. A summary may score high on lexical overlap while missing the one file path the agent needs.\n\nProbe-based evaluation directly measures functional quality by asking questions after compression:\n\nProbe TypeWhat It TestsExample QuestionRecallFactual retention\"What was the original error message?\"ArtifactFile tracking\"Which files have we modified?\"ContinuationTask planning\"What should we do next?\"DecisionReasoning chain\"What did we decide about the Redis issue?\"\n\nIf compression preserved the right information, the agent answers correctly. If not, it guesses or hallucinates."
      },
      {
        "title": "Evaluation Dimensions",
        "body": "Six dimensions capture compression quality for coding agents:\n\nAccuracy: Are technical details correct? File paths, function names, error codes.\nContext Awareness: Does the response reflect current conversation state?\nArtifact Trail: Does the agent know which files were read or modified?\nCompleteness: Does the response address all parts of the question?\nContinuity: Can work continue without re-fetching information?\nInstruction Following: Does the response respect stated constraints?\n\nAccuracy shows the largest variation between compression methods (0.6 point gap). Artifact trail is universally weak (2.2-2.5 range)."
      },
      {
        "title": "Three-Phase Compression Workflow",
        "body": "For large codebases or agent systems exceeding context windows, apply compression through three phases:\n\nResearch Phase: Produce a research document from architecture diagrams, documentation, and key interfaces. Compress exploration into a structured analysis of components and dependencies. Output: single research document.\n\n\nPlanning Phase: Convert research into implementation specification with function signatures, type definitions, and data flow. A 5M token codebase compresses to approximately 2,000 words of specification.\n\n\nImplementation Phase: Execute against the specification. Context remains focused on the spec rather than raw codebase exploration."
      },
      {
        "title": "Using Example Artifacts as Seeds",
        "body": "When provided with a manual migration example or reference PR, use it as a template to understand the target pattern. The example reveals constraints that static analysis cannot surface: which invariants must hold, which services break on changes, and what a clean migration looks like.\n\nThis is particularly important when the agent cannot distinguish essential complexity (business requirements) from accidental complexity (legacy workarounds). The example artifact encodes that distinction."
      },
      {
        "title": "Implementing Anchored Iterative Summarization",
        "body": "Define explicit summary sections matching your agent's needs\nOn first compression trigger, summarize truncated history into sections\nOn subsequent compressions, summarize only new truncated content\nMerge new summary into existing sections rather than regenerating\nTrack which information came from which compression cycle for debugging"
      },
      {
        "title": "When to Use Each Approach",
        "body": "Use anchored iterative summarization when:\n\nSessions are long-running (100+ messages)\nFile tracking matters (coding, debugging)\nYou need to verify what was preserved\n\nUse opaque compression when:\n\nMaximum token savings required\nSessions are relatively short\nRe-fetching costs are low\n\nUse regenerative summaries when:\n\nSummary interpretability is critical\nSessions have clear phase boundaries\nFull context review is acceptable on each compression"
      },
      {
        "title": "Compression Ratio Considerations",
        "body": "MethodCompression RatioQuality ScoreTrade-offAnchored Iterative98.6%3.70Best quality, slightly less compressionRegenerative98.7%3.44Good quality, moderate compressionOpaque99.3%3.35Best compression, quality loss\n\nThe 0.7% additional tokens retained by structured summarization buys 0.35 quality points. For any task where re-fetching costs matter, this trade-off favors structured approaches."
      },
      {
        "title": "Examples",
        "body": "Example 1: Debugging Session Compression\n\nOriginal context (89,000 tokens, 178 messages):\n\n401 error on /api/auth/login endpoint\nTraced through auth controller, middleware, session store\nFound stale Redis connection\nFixed connection pooling, added retry logic\n14 tests passing, 2 failing\n\nStructured summary after compression:\n\n## Session Intent\nDebug 401 Unauthorized error on /api/auth/login despite valid credentials.\n\n## Root Cause\nStale Redis connection in session store. JWT generated correctly but session could not be persisted.\n\n## Files Modified\n- auth.controller.ts: No changes (read only)\n- middleware/cors.ts: No changes (examined)\n- config/redis.ts: Fixed connection pooling configuration\n- services/session.service.ts: Added retry logic for transient failures\n- tests/auth.test.ts: Updated mock setup\n\n## Test Status\n14 passing, 2 failing (mock setup issues)\n\n## Next Steps\n1. Fix remaining test failures (mock session service)\n2. Run full test suite\n3. Deploy to staging\n\nExample 2: Probe Response Quality\n\nAfter compression, asking \"What was the original error?\":\n\nGood response (structured summarization):\n\n\"The original error was a 401 Unauthorized response from the /api/auth/login endpoint. Users received this error with valid credentials. Root cause was stale Redis connection in session store.\"\n\nPoor response (aggressive compression):\n\n\"We were debugging an authentication issue. The login was failing. We fixed some configuration problems.\"\n\nThe structured response preserves endpoint, error code, and root cause. The aggressive response loses all technical detail."
      },
      {
        "title": "Guidelines",
        "body": "Optimize for tokens-per-task, not tokens-per-request\nUse structured summaries with explicit sections for file tracking\nTrigger compression at 70-80% context utilization\nImplement incremental merging rather than full regeneration\nTest compression quality with probe-based evaluation\nTrack artifact trail separately if file tracking is critical\nAccept slightly lower compression ratios for better quality retention\nMonitor re-fetching frequency as a compression quality signal"
      },
      {
        "title": "Integration",
        "body": "This skill connects to several others in the collection:\n\ncontext-degradation - Compression is a mitigation strategy for degradation\ncontext-optimization - Compression is one optimization technique among many\nevaluation - Probe-based evaluation applies to compression testing\nmemory-systems - Compression relates to scratchpad and summary memory patterns"
      },
      {
        "title": "References",
        "body": "Internal reference:\n\nEvaluation Framework Reference - Detailed probe types and scoring rubrics\n\nRelated skills in this collection:\n\ncontext-degradation - Understanding what compression prevents\ncontext-optimization - Broader optimization strategies\nevaluation - Building evaluation frameworks\n\nExternal resources:\n\nFactory Research: Evaluating Context Compression for AI Agents (December 2025)\nResearch on LLM-as-judge evaluation methodology (Zheng et al., 2023)\nNetflix Engineering: \"The Infinite Software Crisis\" - Three-phase workflow and context compression at scale (AI Summit 2025)"
      },
      {
        "title": "Skill Metadata",
        "body": "Created: 2025-12-22\nLast Updated: 2025-12-26\nAuthor: Agent Skills for Context Engineering Contributors\nVersion: 1.1.0"
      }
    ],
    "body": "Context Compression Strategies\n\nWhen agent sessions generate millions of tokens of conversation history, compression becomes mandatory. The naive approach is aggressive compression to minimize tokens per request. The correct optimization target is tokens per task: total tokens consumed to complete a task, including re-fetching costs when compression loses critical information.\n\nWhen to Activate\n\nActivate this skill when:\n\nAgent sessions exceed context window limits\nCodebases exceed context windows (5M+ token systems)\nDesigning conversation summarization strategies\nDebugging cases where agents \"forget\" what files they modified\nBuilding evaluation frameworks for compression quality\nCore Concepts\n\nContext compression trades token savings against information loss. Three production-ready approaches exist:\n\nAnchored Iterative Summarization: Maintain structured, persistent summaries with explicit sections for session intent, file modifications, decisions, and next steps. When compression triggers, summarize only the newly-truncated span and merge with the existing summary. Structure forces preservation by dedicating sections to specific information types.\n\nOpaque Compression: Produce compressed representations optimized for reconstruction fidelity. Achieves highest compression ratios (99%+) but sacrifices interpretability. Cannot verify what was preserved.\n\nRegenerative Full Summary: Generate detailed structured summaries on each compression. Produces readable output but may lose details across repeated compression cycles due to full regeneration rather than incremental merging.\n\nThe critical insight: structure forces preservation. Dedicated sections act as checklists that the summarizer must populate, preventing silent information drift.\n\nDetailed Topics\nWhy Tokens-Per-Task Matters\n\nTraditional compression metrics target tokens-per-request. This is the wrong optimization. When compression loses critical details like file paths or error messages, the agent must re-fetch information, re-explore approaches, and waste tokens recovering context.\n\nThe right metric is tokens-per-task: total tokens consumed from task start to completion. A compression strategy saving 0.5% more tokens but causing 20% more re-fetching costs more overall.\n\nThe Artifact Trail Problem\n\nArtifact trail integrity is the weakest dimension across all compression methods, scoring 2.2-2.5 out of 5.0 in evaluations. Even structured summarization with explicit file sections struggles to maintain complete file tracking across long sessions.\n\nCoding agents need to know:\n\nWhich files were created\nWhich files were modified and what changed\nWhich files were read but not changed\nFunction names, variable names, error messages\n\nThis problem likely requires specialized handling beyond general summarization: a separate artifact index or explicit file-state tracking in agent scaffolding.\n\nStructured Summary Sections\n\nEffective structured summaries include explicit sections:\n\n## Session Intent\n[What the user is trying to accomplish]\n\n## Files Modified\n- auth.controller.ts: Fixed JWT token generation\n- config/redis.ts: Updated connection pooling\n- tests/auth.test.ts: Added mock setup for new config\n\n## Decisions Made\n- Using Redis connection pool instead of per-request connections\n- Retry logic with exponential backoff for transient failures\n\n## Current State\n- 14 tests passing, 2 failing\n- Remaining: mock setup for session service tests\n\n## Next Steps\n1. Fix remaining test failures\n2. Run full test suite\n3. Update documentation\n\n\nThis structure prevents silent loss of file paths or decisions because each section must be explicitly addressed.\n\nCompression Trigger Strategies\n\nWhen to trigger compression matters as much as how to compress:\n\nStrategy\tTrigger Point\tTrade-off\nFixed threshold\t70-80% context utilization\tSimple but may compress too early\nSliding window\tKeep last N turns + summary\tPredictable context size\nImportance-based\tCompress low-relevance sections first\tComplex but preserves signal\nTask-boundary\tCompress at logical task completions\tClean summaries but unpredictable timing\n\nThe sliding window approach with structured summaries provides the best balance of predictability and quality for most coding agent use cases.\n\nProbe-Based Evaluation\n\nTraditional metrics like ROUGE or embedding similarity fail to capture functional compression quality. A summary may score high on lexical overlap while missing the one file path the agent needs.\n\nProbe-based evaluation directly measures functional quality by asking questions after compression:\n\nProbe Type\tWhat It Tests\tExample Question\nRecall\tFactual retention\t\"What was the original error message?\"\nArtifact\tFile tracking\t\"Which files have we modified?\"\nContinuation\tTask planning\t\"What should we do next?\"\nDecision\tReasoning chain\t\"What did we decide about the Redis issue?\"\n\nIf compression preserved the right information, the agent answers correctly. If not, it guesses or hallucinates.\n\nEvaluation Dimensions\n\nSix dimensions capture compression quality for coding agents:\n\nAccuracy: Are technical details correct? File paths, function names, error codes.\nContext Awareness: Does the response reflect current conversation state?\nArtifact Trail: Does the agent know which files were read or modified?\nCompleteness: Does the response address all parts of the question?\nContinuity: Can work continue without re-fetching information?\nInstruction Following: Does the response respect stated constraints?\n\nAccuracy shows the largest variation between compression methods (0.6 point gap). Artifact trail is universally weak (2.2-2.5 range).\n\nPractical Guidance\nThree-Phase Compression Workflow\n\nFor large codebases or agent systems exceeding context windows, apply compression through three phases:\n\nResearch Phase: Produce a research document from architecture diagrams, documentation, and key interfaces. Compress exploration into a structured analysis of components and dependencies. Output: single research document.\n\nPlanning Phase: Convert research into implementation specification with function signatures, type definitions, and data flow. A 5M token codebase compresses to approximately 2,000 words of specification.\n\nImplementation Phase: Execute against the specification. Context remains focused on the spec rather than raw codebase exploration.\n\nUsing Example Artifacts as Seeds\n\nWhen provided with a manual migration example or reference PR, use it as a template to understand the target pattern. The example reveals constraints that static analysis cannot surface: which invariants must hold, which services break on changes, and what a clean migration looks like.\n\nThis is particularly important when the agent cannot distinguish essential complexity (business requirements) from accidental complexity (legacy workarounds). The example artifact encodes that distinction.\n\nImplementing Anchored Iterative Summarization\nDefine explicit summary sections matching your agent's needs\nOn first compression trigger, summarize truncated history into sections\nOn subsequent compressions, summarize only new truncated content\nMerge new summary into existing sections rather than regenerating\nTrack which information came from which compression cycle for debugging\nWhen to Use Each Approach\n\nUse anchored iterative summarization when:\n\nSessions are long-running (100+ messages)\nFile tracking matters (coding, debugging)\nYou need to verify what was preserved\n\nUse opaque compression when:\n\nMaximum token savings required\nSessions are relatively short\nRe-fetching costs are low\n\nUse regenerative summaries when:\n\nSummary interpretability is critical\nSessions have clear phase boundaries\nFull context review is acceptable on each compression\nCompression Ratio Considerations\nMethod\tCompression Ratio\tQuality Score\tTrade-off\nAnchored Iterative\t98.6%\t3.70\tBest quality, slightly less compression\nRegenerative\t98.7%\t3.44\tGood quality, moderate compression\nOpaque\t99.3%\t3.35\tBest compression, quality loss\n\nThe 0.7% additional tokens retained by structured summarization buys 0.35 quality points. For any task where re-fetching costs matter, this trade-off favors structured approaches.\n\nExamples\n\nExample 1: Debugging Session Compression\n\nOriginal context (89,000 tokens, 178 messages):\n\n401 error on /api/auth/login endpoint\nTraced through auth controller, middleware, session store\nFound stale Redis connection\nFixed connection pooling, added retry logic\n14 tests passing, 2 failing\n\nStructured summary after compression:\n\n## Session Intent\nDebug 401 Unauthorized error on /api/auth/login despite valid credentials.\n\n## Root Cause\nStale Redis connection in session store. JWT generated correctly but session could not be persisted.\n\n## Files Modified\n- auth.controller.ts: No changes (read only)\n- middleware/cors.ts: No changes (examined)\n- config/redis.ts: Fixed connection pooling configuration\n- services/session.service.ts: Added retry logic for transient failures\n- tests/auth.test.ts: Updated mock setup\n\n## Test Status\n14 passing, 2 failing (mock setup issues)\n\n## Next Steps\n1. Fix remaining test failures (mock session service)\n2. Run full test suite\n3. Deploy to staging\n\n\nExample 2: Probe Response Quality\n\nAfter compression, asking \"What was the original error?\":\n\nGood response (structured summarization):\n\n\"The original error was a 401 Unauthorized response from the /api/auth/login endpoint. Users received this error with valid credentials. Root cause was stale Redis connection in session store.\"\n\nPoor response (aggressive compression):\n\n\"We were debugging an authentication issue. The login was failing. We fixed some configuration problems.\"\n\nThe structured response preserves endpoint, error code, and root cause. The aggressive response loses all technical detail.\n\nGuidelines\nOptimize for tokens-per-task, not tokens-per-request\nUse structured summaries with explicit sections for file tracking\nTrigger compression at 70-80% context utilization\nImplement incremental merging rather than full regeneration\nTest compression quality with probe-based evaluation\nTrack artifact trail separately if file tracking is critical\nAccept slightly lower compression ratios for better quality retention\nMonitor re-fetching frequency as a compression quality signal\nIntegration\n\nThis skill connects to several others in the collection:\n\ncontext-degradation - Compression is a mitigation strategy for degradation\ncontext-optimization - Compression is one optimization technique among many\nevaluation - Probe-based evaluation applies to compression testing\nmemory-systems - Compression relates to scratchpad and summary memory patterns\nReferences\n\nInternal reference:\n\nEvaluation Framework Reference - Detailed probe types and scoring rubrics\n\nRelated skills in this collection:\n\ncontext-degradation - Understanding what compression prevents\ncontext-optimization - Broader optimization strategies\nevaluation - Building evaluation frameworks\n\nExternal resources:\n\nFactory Research: Evaluating Context Compression for AI Agents (December 2025)\nResearch on LLM-as-judge evaluation methodology (Zheng et al., 2023)\nNetflix Engineering: \"The Infinite Software Crisis\" - Three-phase workflow and context compression at scale (AI Summit 2025)\nSkill Metadata\n\nCreated: 2025-12-22 Last Updated: 2025-12-26 Author: Agent Skills for Context Engineering Contributors Version: 1.1.0"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/leoyessi10-tech/context-engineering",
    "publisherUrl": "https://clawhub.ai/leoyessi10-tech/context-engineering",
    "owner": "leoyessi10-tech",
    "version": "1.0.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/context-engineering",
    "downloadUrl": "https://openagent3.xyz/downloads/context-engineering",
    "agentUrl": "https://openagent3.xyz/skills/context-engineering/agent",
    "manifestUrl": "https://openagent3.xyz/skills/context-engineering/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/context-engineering/agent.md"
  }
}