{
  "schemaVersion": "1.0",
  "item": {
    "slug": "surrealdb-knowledge-graph-memory",
    "name": "Self-improving Agent Memory Upgrade (SurrealDB)",
    "source": "tencent",
    "type": "skill",
    "category": "开发工具",
    "sourceUrl": "https://clawhub.ai/maverick-software/surrealdb-knowledge-graph-memory",
    "canonicalUrl": "https://clawhub.ai/maverick-software/surrealdb-knowledge-graph-memory",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/surrealdb-knowledge-graph-memory",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=surrealdb-knowledge-graph-memory",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SECURITY.md",
      "SKILL.md",
      "CHANGELOG.md",
      "README.md",
      "skill.json",
      "INSTRUCTIONS.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-05-07T17:22:31.273Z",
      "expiresAt": "2026-05-14T17:22:31.273Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=afrexai-annual-report",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=afrexai-annual-report",
        "contentDisposition": "attachment; filename=\"afrexai-annual-report-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/surrealdb-knowledge-graph-memory"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/surrealdb-knowledge-graph-memory",
    "agentPageUrl": "https://openagent3.xyz/skills/surrealdb-knowledge-graph-memory/agent",
    "manifestUrl": "https://openagent3.xyz/skills/surrealdb-knowledge-graph-memory/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/surrealdb-knowledge-graph-memory/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "SurrealDB Knowledge Graph Memory v2.2",
        "body": "A comprehensive knowledge graph memory system with semantic search, episodic memory, working memory, automatic context injection, and per-agent isolation — enabling every agent to become a continuously self-improving AI."
      },
      {
        "title": "Description",
        "body": "Use this skill for:\n\nSemantic Memory — Store and retrieve facts with confidence-weighted vector search\nEpisodic Memory — Record task histories and learn from past experiences\nWorking Memory — Track active task state with crash recovery\nAuto-Injection — Automatically inject relevant context into agent prompts\nOutcome Calibration — Facts gain/lose confidence based on task outcomes\nSelf-Improvement — Scheduled extraction and relation discovery make every agent smarter over time\n\nTriggers: \"remember this\", \"store fact\", \"what do you know about\", \"memory search\", \"find similar tasks\", \"learn from history\"\n\nSecurity: This skill reads workspace memory files and sends their content to OpenAI for extraction. It registers two background cron jobs and (optionally) patches OpenClaw source files. All behaviors are opt-in or documented. See SECURITY.md for the full breakdown before enabling.\nRequired: OPENAI_API_KEY, surreal binary, python3 ≥3.10"
      },
      {
        "title": "🔄 Self-Improving Agent Loop",
        "body": "This is the core concept: every agent equipped with this skill improves itself automatically, with no manual intervention required. Two scheduled cron jobs — knowledge extraction and relationship correlation — run on a fixed schedule and continuously grow the knowledge graph. Combined with auto-injection, the agent gets progressively smarter with each conversation."
      },
      {
        "title": "The Cycle",
        "body": "[Agent Conversation]\n       ↓  stores important facts via knowledge_store_sync\n[Memory Files]  ← agent writes to MEMORY.md / daily memory/*.md files\n       ↓  every 6 hours — extraction cron fires\n[Entity + Fact Extraction]  ← LLM reads files, extracts structured facts + entities\n       ↓  facts stored with embeddings + agent_id tag\n[Knowledge Graph]  ← SurrealDB: facts, entities, mentions\n       ↓  daily at 3 AM — relation discovery cron fires\n[Relationship Correlation]  ← AI finds semantic links between facts\n       ↓  relates_to edges created between connected facts\n[Richer Knowledge Graph]  ← facts are no longer isolated; they form a web\n       ↓  on every new message — auto-injection reads the graph\n[Context Window]  ← relevant facts + relations + episodes injected automatically\n       ↓\n[Better Responses]  ← agent uses accumulated knowledge to respond more accurately\n       ↑  new insights written back to memory files → cycle repeats"
      },
      {
        "title": "What Each Scheduled Job Does",
        "body": "Job 1 — Knowledge Extraction (every 6 hours)\n\nScript: scripts/extract-knowledge.py extract\n\nReads MEMORY.md and all memory/YYYY-MM-DD.md files in the workspace\nUses an LLM (GPT-4) to extract structured facts, entities, and key concepts\nHashes file content to skip unchanged files — only processes diffs\nStores each fact with:\n\nA vector embedding (OpenAI text-embedding-3-small) for semantic search\nA confidence score (defaults to 0.9)\nAn agent_id tag so facts stay isolated to the right agent\nsource metadata pointing back to the originating file\n\n\nResult: raw conversational knowledge becomes searchable, structured memory\n\nJob 2 — Relationship Correlation (daily at 3 AM)\n\nScript: scripts/extract-knowledge.py discover-relations\n\nQueries the graph for facts that have no relationships yet (\"isolated facts\")\nBatches them and asks an LLM to identify semantic connections between them\nCreates relates_to edges in SurrealDB linking related facts\nResult: isolated facts become a connected knowledge web — the agent can now traverse relationships, not just keyword-match\nOver time, the graph evolves from a flat list into a rich semantic network\n\nJob 3 — Deduplication (daily at 4 AM)\n\nScript: scripts/extract-knowledge.py dedupe --threshold 0.92\n\nCompares all facts using vector similarity (cosine distance)\nFacts above the threshold (92% similar) are flagged as duplicates\nKeeps the higher-confidence fact, removes the duplicate\nPrevents extraction from creating bloat over time\nResult: a clean, non-redundant knowledge base\n\nJob 4 — Reconciliation (weekly, Sundays at 5 AM)\n\nScript: scripts/extract-knowledge.py reconcile --verbose\n\nApplies time-based confidence decay to aging facts\nPrunes facts that have decayed below minimum confidence\nCleans orphaned entities with no linked facts\nConsolidates near-duplicate entities\nResult: the knowledge graph stays healthy, relevant, and pruned of stale information"
      },
      {
        "title": "Why This Makes Agents Self-Improving",
        "body": "When auto-injection is enabled, every new conversation starts with the most relevant slice of the accumulated knowledge graph. As the agent:\n\nHas conversations → writes insights to memory files\nExtraction job fires → converts those insights into structured facts\nRelation job fires → connects those facts to existing knowledge\nNext conversation → auto-injection pulls in richer, more connected context\n\n...the agent effectively gets smarter with every cycle. It learns from its own outputs, grounds future responses in its accumulated history, and avoids repeating mistakes (via episodic memory and outcome calibration)."
      },
      {
        "title": "OpenClaw Cron Jobs (Required)",
        "body": "The skill requires 5 cron jobs for full self-improving operation. All run as isolated background sessions with no delivery:\n\nJob NameScheduleWhat it runsMemory Knowledge ExtractionEvery 6 hours (0 */6 * * *)extract-knowledge.py extract — extracts facts from memory filesMemory Relation DiscoveryDaily at 3 AM (0 3 * * *)extract-knowledge.py discover-relations — AI-powered relationship findingMemory DeduplicationDaily at 4 AM (0 4 * * *)extract-knowledge.py dedupe --threshold 0.92 — removes duplicate/near-duplicate factsMemory ReconciliationWeekly Sun 5 AM (0 5 * * 0)extract-knowledge.py reconcile --verbose — prunes stale facts, applies confidence decay, cleans orphans\n\nAll jobs use sessionTarget: \"isolated\" with delivery: none. They run in fully isolated background sessions and never fire into the main agent session. A bottom-right corner toast notification appears in the Control UI when each job starts and completes.\n\nSetup commands (run after installation):\n\n# 1. Knowledge Extraction — every 6 hours\nopenclaw cron add \\\n  --name \"Memory Knowledge Extraction\" \\\n  --cron \"0 */6 * * *\" \\\n  --agent main --session isolated --no-deliver \\\n  --timeout-seconds 300 \\\n  --message \"Run memory knowledge extraction. Execute: cd SKILL_DIR && source .venv/bin/activate && python3 scripts/extract-knowledge.py extract\"\n\n# 2. Relation Discovery — daily at 3 AM\nopenclaw cron add \\\n  --name \"Memory Relation Discovery\" \\\n  --cron \"0 3 * * *\" --exact \\\n  --agent main --session isolated --no-deliver \\\n  --timeout-seconds 300 \\\n  --message \"Run memory relation discovery. Execute: cd SKILL_DIR && source .venv/bin/activate && python3 scripts/extract-knowledge.py discover-relations\"\n\n# 3. Deduplication — daily at 4 AM\nopenclaw cron add \\\n  --name \"Memory Deduplication\" \\\n  --cron \"0 4 * * *\" --exact \\\n  --agent main --session isolated --no-deliver \\\n  --timeout-seconds 120 \\\n  --message \"Run knowledge graph deduplication. Execute: cd SKILL_DIR && source .venv/bin/activate && python3 scripts/extract-knowledge.py dedupe --threshold 0.92\"\n\n# 4. Reconciliation — weekly on Sundays at 5 AM\nopenclaw cron add \\\n  --name \"Memory Reconciliation\" \\\n  --cron \"0 5 * * 0\" --exact \\\n  --agent main --session isolated --no-deliver \\\n  --timeout-seconds 180 \\\n  --message \"Run knowledge graph reconciliation. Execute: cd SKILL_DIR && source .venv/bin/activate && python3 scripts/extract-knowledge.py reconcile --verbose\"\n\nReplace SKILL_DIR with your actual skill path.\n\nTo check job status:\n\nopenclaw cron list"
      },
      {
        "title": "Adding Cron Jobs for a New Agent",
        "body": "When spawning a new agent that should self-improve, register its own extraction job:\n\n# OpenClaw cron add (via Koda) — example for a 'scout-monitor' agent\n# Schedule: every 6h, extract facts tagged to scout-monitor\npython3 scripts/extract-knowledge.py extract --agent-id scout-monitor\n\nThe --agent-id flag ensures extracted facts are isolated to that agent's pool and don't pollute the main agent's knowledge. Each agent self-improves independently while still reading shared scope='global' facts."
      },
      {
        "title": "Features (v2.2)",
        "body": "FeatureDescriptionSemantic FactsVector-indexed facts with confidence scoringEpisodic MemoryTask histories with decisions, problems, solutions, learningsWorking MemoryYAML-based task state that survives crashesOutcome CalibrationFacts used in successful tasks gain confidenceAuto-InjectionRelevant facts/episodes injected into prompts automaticallyEntity ExtractionAutomatic entity linking and relationship discoveryConfidence DecayStale facts naturally decay over timeAgent IsolationEach agent has its own scoped memory pool; scope='global' facts are shared across all agentsSelf-Improving LoopScheduled extraction + relation discovery automatically grow the graph"
      },
      {
        "title": "Agent Isolation (v2.2)",
        "body": "Each agent in OpenClaw has its own scoped memory pool. Facts are tagged with agent_id on write; all read queries filter to (agent_id = $agent_id OR scope = 'global')."
      },
      {
        "title": "How it works",
        "body": "Agent A (main)          Agent B (scout-monitor)\n   ┌──────────┐              ┌──────────┐\n   │ 391 facts│              │   0 facts│   ← isolated pools\n   └──────────┘              └──────────┘\n         ↑                         ↑\n         └──── scope='global' ─────┘   ← shared facts visible to both"
      },
      {
        "title": "Storing facts",
        "body": "All knowledge_store / knowledge_store_sync calls accept agent_id:\n\n# Stored to scout-monitor's pool only\nmcporter call surrealdb-memory.knowledge_store \\\n    content=\"API is healthy at /ping\" \\\n    agent_id='scout-monitor'\n\n# Stored globally (visible to all agents)\nmcporter call surrealdb-memory.knowledge_store \\\n    content=\"Project uses Python 3.12\" \\\n    agent_id='main' scope='global'"
      },
      {
        "title": "Auto-injection (agent-aware)",
        "body": "With references/enhanced-loop-hook-agent-isolation.md applied to src/agents/enhanced-loop-hook.ts, the enhanced loop automatically extracts the agent ID from the session key and passes it to memory_inject. No manual configuration needed — each agent's auto-injection is silently scoped to its own facts."
      },
      {
        "title": "Extraction (agent-aware)",
        "body": "Pass --agent-id to extract-knowledge.py so cron-extracted facts are correctly tagged:\n\npython3 scripts/extract-knowledge.py extract --agent-id scout-monitor\n\nDefault is \"main\". Update cron jobs accordingly for non-main agents."
      },
      {
        "title": "Backward compatibility",
        "body": "Existing facts without an explicit agent_id are treated as owned by \"main\". Nothing is lost on upgrade to v2.2."
      },
      {
        "title": "Dashboard UI",
        "body": "The Memory tab in the Control dashboard provides a two-column layout:"
      },
      {
        "title": "Left Column: Dashboard",
        "body": "📊 Statistics — Live counts of facts, entities, relations, and archived items\nConfidence Bar — Visual display of average confidence score\nSources Breakdown — Facts grouped by source file\n🏥 System Health — Status of SurrealDB, schema, and Python dependencies\n🔗 DB Studio — Quick link to SurrealDB's web interface"
      },
      {
        "title": "Right Column: Operations",
        "body": "📥 Knowledge Extraction\n\nExtract Changes — Incrementally extract facts from modified files\nFind Relations — Discover semantic relationships between existing facts\nFull Sync — Complete extraction + relation discovery\nProgress bar with real-time status updates\n\n\n\n🔧 Maintenance\n\nApply Decay — Reduce confidence of stale facts\nPrune Stale — Archive facts below threshold\nFull Sweep — Complete maintenance cycle\n\n\n\n💡 Tips — Quick reference for operations\n\nWhen the system needs setup, an Installation section appears with manual controls."
      },
      {
        "title": "Prerequisites",
        "body": "SurrealDB installed and running:\n# Install (one-time)\n./scripts/install.sh\n\n# Start server\nsurreal start --bind 127.0.0.1:8000 --user root --pass root file:~/.openclaw/memory/knowledge.db\n\n\n\nPython dependencies (use the skill's venv):\ncd /path/to/surrealdb-memory\npython3 -m venv .venv\nsource .venv/bin/activate\npip install surrealdb openai pyyaml\n\n\n\nOpenAI API key for embeddings (set in OpenClaw config or environment)\n\n\nmcporter configured with this skill's MCP server"
      },
      {
        "title": "MCP Server Setup",
        "body": "Add to your config/mcporter.json:\n\n{\n  \"servers\": {\n    \"surrealdb-memory\": {\n      \"command\": [\"python3\", \"/path/to/surrealdb-memory/scripts/mcp-server-v2.py\"],\n      \"env\": {\n        \"OPENAI_API_KEY\": \"${OPENAI_API_KEY}\",\n        \"SURREAL_URL\": \"http://localhost:8000\",\n        \"SURREAL_USER\": \"root\",\n        \"SURREAL_PASS\": \"root\"\n      }\n    }\n  }\n}"
      },
      {
        "title": "Core Tools",
        "body": "ToolDescriptionknowledge_searchSemantic search for factsknowledge_recallGet a fact with full context (relations, entities)knowledge_storeStore a new factknowledge_statsGet database statistics"
      },
      {
        "title": "v2 Tools",
        "body": "ToolDescriptionknowledge_store_syncStore with importance routing (high importance = immediate write)episode_searchFind similar past tasksepisode_learningsGet actionable learnings from historyepisode_storeRecord a completed task episodeworking_memory_statusGet current task statecontext_aware_searchSearch with task context boostingmemory_injectIntelligent context injection for prompts"
      },
      {
        "title": "memory_inject Tool",
        "body": "The memory_inject tool returns formatted context ready for prompt injection:\n\n# Scoped to a specific agent (returns only that agent's facts + global facts)\nmcporter call surrealdb-memory.memory_inject \\\n    query=\"user message\" \\\n    max_facts:7 \\\n    max_episodes:3 \\\n    confidence_threshold:0.9 \\\n    include_relations:true \\\n    agent_id='scout-monitor'\n\nOutput:\n\n## Semantic Memory (Relevant Facts)\n📌 [60% relevant, 100% confidence] Relevant fact here...\n\n## Related Entities\n• Entity Name (type)\n\n## Episodic Memory (Past Experiences)\n✅ Task: Previous task goal [similarity]\n   → Key learning from that task"
      },
      {
        "title": "Auto-Injection (Enhanced Loop Integration)",
        "body": "When enabled, memory is automatically injected into every agent turn:\n\nEnable in Mode UI:\n\nOpen Control dashboard → Mode tab\nScroll to \"🧠 Memory & Knowledge Graph\" section\nToggle \"Auto-Inject Context\"\nConfigure limits (max facts, max episodes, confidence threshold)\n\n\n\nHow it works:\n\nOn each user message, memory_inject is called automatically\nRelevant facts are searched based on the user's query\nIf average fact confidence < threshold, episodic memories are included\nFormatted context is injected into the agent's system prompt\nv2.2: With references/enhanced-loop-hook-agent-isolation.md applied, the active agent's ID is automatically extracted from the session key and passed as agent_id — each agent's injection is silently scoped to its own facts\n\n\n\nConfiguration (in Mode settings):\nSettingDefaultDescriptionAuto-Inject ContextOffMaster toggleMax Facts7Maximum semantic facts to injectMax Episodes3Maximum episodic memoriesConfidence Threshold90%Include episodes when below thisInclude RelationsOnInclude entity relationships"
      },
      {
        "title": "CLI Commands",
        "body": "# Activate venv\nsource .venv/bin/activate\n\n# Store a fact\npython scripts/memory-cli.py store \"Important fact\" --confidence 0.9\n\n# Search\npython scripts/memory-cli.py search \"query\"\n\n# Get stats\npython scripts/knowledge-tool.py stats\n\n# Run maintenance\npython scripts/memory-cli.py maintain\n\n# Extract from files (incremental)\npython scripts/extract-knowledge.py extract\n\n# Extract for a specific agent\npython scripts/extract-knowledge.py extract --agent-id scout-monitor\n\n# Force full extraction (all files, not just changed)\npython scripts/extract-knowledge.py extract --full\n\n# Discover semantic relationships\npython scripts/extract-knowledge.py discover-relations"
      },
      {
        "title": "Tables",
        "body": "fact — Semantic facts with embeddings and confidence\nentity — Extracted entities (people, places, concepts)\nrelates_to — Relationships between facts\nmentions — Fact-to-entity links\nepisode — Task histories with outcomes\nworking_memory — Active task snapshots"
      },
      {
        "title": "Key Fields (fact)",
        "body": "content — The fact text\nembedding — Vector for semantic search\nconfidence — Base confidence (0-1)\nsuccess_count / failure_count — Outcome tracking\nscope — global, client, or agent\nagent_id — Which agent owns this fact (v2.2)"
      },
      {
        "title": "Key Fields (episode)",
        "body": "goal — What was attempted\noutcome — success, failure, abandoned\ndecisions — Key decisions made\nproblems — Problems encountered (structured)\nsolutions — Solutions applied (structured)\nkey_learnings — Extracted lessons"
      },
      {
        "title": "Confidence Scoring",
        "body": "Effective confidence is calculated from:\n\nBase confidence (0.0–1.0)\n+ Inherited boost from supporting facts\n+ Entity boost from well-established entities\n+ Outcome adjustment based on success/failure history\n- Contradiction drain from conflicting facts\n- Time decay (configurable, ~5% per month)"
      },
      {
        "title": "Automated — OpenClaw Cron (as deployed)",
        "body": "The self-improving loop runs via 4 registered OpenClaw cron jobs:\n\nEvery 6h     → extract-knowledge.py extract            (extract facts from memory files)\nDaily 3 AM   → extract-knowledge.py discover-relations  (find relationships between facts)\nDaily 4 AM   → extract-knowledge.py dedupe              (remove duplicate facts)\nWeekly Sun   → extract-knowledge.py reconcile            (prune stale, decay, clean orphans)\n\nSee the \"OpenClaw Cron Jobs (Required)\" section above for setup commands.\n\nTo verify they're active:\n\nopenclaw cron list\n\nTo manually trigger any job:\n\ncd SKILL_DIR && source .venv/bin/activate\npython3 scripts/extract-knowledge.py extract\npython3 scripts/extract-knowledge.py discover-relations\npython3 scripts/extract-knowledge.py dedupe --threshold 0.92\npython3 scripts/extract-knowledge.py reconcile --verbose"
      },
      {
        "title": "Manual (UI)",
        "body": "Use the Maintenance section in the Memory tab:\n\nApply Decay — Reduce confidence of stale facts\nPrune Stale — Archive facts below 0.3 confidence\nFull Sweep — Run complete maintenance cycle"
      },
      {
        "title": "Scripts",
        "body": "FilePurposemcp-server-v2.pyMCP server with all 11 toolsmcp-server.pyLegacy v1 MCP serverepisodes.pyEpisodic memory moduleworking_memory.pyWorking memory modulememory-cli.pyCLI for manual operationsextract-knowledge.pyBulk extraction from files (supports --agent-id)knowledge-tools.pyHigher-level extractionschema-v2.sqlv2 database schemamigrate-v2.pyMigration script"
      },
      {
        "title": "Integration",
        "body": "FilePurposeopenclaw-integration/gateway/memory.tsGateway server methodsopenclaw-integration/ui/memory-view.tsMemory dashboard UIopenclaw-integration/ui/memory-controller.tsUI controller"
      },
      {
        "title": "Troubleshooting",
        "body": "\"Connection refused\"\n→ Start SurrealDB: surreal start --bind 127.0.0.1:8000 --user root --pass root file:~/.openclaw/memory/knowledge.db\n\n\"No MCP servers configured\"\n→ Ensure mcporter is run from a directory containing config/mcporter.json with the surrealdb-memory server defined\n\nMemory injection returning null\n→ Check that OPENAI_API_KEY is set in the environment\n→ Verify SurrealDB is running and schema is initialized\n\nEmpty search results\n→ Run extraction from the UI or via CLI: python3 scripts/extract-knowledge.py extract\n\n\"No facts to analyze\" on relation discovery\n→ This is normal if all facts are already related — the graph is well-connected. Run extraction first if the graph is empty.\n\nProgress bar not updating\n→ Ensure the gateway has been restarted after UI updates\n→ Check browser console for polling errors\n\nFacts from wrong agent appearing\n→ Check that agent_id is being passed correctly to all store/search calls\n→ Verify references/enhanced-loop-hook-agent-isolation.md is applied for auto-injection scoping"
      },
      {
        "title": "Migration from v1 / v2.1",
        "body": "# Apply v2 schema (additive, won't delete existing data)\n./scripts/migrate-v2.sh\n\n# Or manually:\nsource .venv/bin/activate\npython scripts/migrate-v2.py\n\nAll existing facts without an agent_id are treated as owned by \"main\" — backward compatible."
      },
      {
        "title": "Stats",
        "body": "Check your knowledge graph via UI (Dashboard section) or CLI:\n\nmcporter call surrealdb-memory.knowledge_stats\n\nExample output:\n\n{\n  \"facts\": 379,\n  \"entities\": 485,\n  \"relations\": 106,\n  \"episodes\": 3,\n  \"avg_confidence\": 0.99\n}\n\nv2.2 — Agent isolation, self-improving loop, cron-based extraction & relationship correlation"
      }
    ],
    "body": "SurrealDB Knowledge Graph Memory v2.2\n\nA comprehensive knowledge graph memory system with semantic search, episodic memory, working memory, automatic context injection, and per-agent isolation — enabling every agent to become a continuously self-improving AI.\n\nDescription\n\nUse this skill for:\n\nSemantic Memory — Store and retrieve facts with confidence-weighted vector search\nEpisodic Memory — Record task histories and learn from past experiences\nWorking Memory — Track active task state with crash recovery\nAuto-Injection — Automatically inject relevant context into agent prompts\nOutcome Calibration — Facts gain/lose confidence based on task outcomes\nSelf-Improvement — Scheduled extraction and relation discovery make every agent smarter over time\n\nTriggers: \"remember this\", \"store fact\", \"what do you know about\", \"memory search\", \"find similar tasks\", \"learn from history\"\n\nSecurity: This skill reads workspace memory files and sends their content to OpenAI for extraction. It registers two background cron jobs and (optionally) patches OpenClaw source files. All behaviors are opt-in or documented. See SECURITY.md for the full breakdown before enabling.\n\nRequired: OPENAI_API_KEY, surreal binary, python3 ≥3.10\n\n🔄 Self-Improving Agent Loop\n\nThis is the core concept: every agent equipped with this skill improves itself automatically, with no manual intervention required. Two scheduled cron jobs — knowledge extraction and relationship correlation — run on a fixed schedule and continuously grow the knowledge graph. Combined with auto-injection, the agent gets progressively smarter with each conversation.\n\nThe Cycle\n[Agent Conversation]\n       ↓  stores important facts via knowledge_store_sync\n[Memory Files]  ← agent writes to MEMORY.md / daily memory/*.md files\n       ↓  every 6 hours — extraction cron fires\n[Entity + Fact Extraction]  ← LLM reads files, extracts structured facts + entities\n       ↓  facts stored with embeddings + agent_id tag\n[Knowledge Graph]  ← SurrealDB: facts, entities, mentions\n       ↓  daily at 3 AM — relation discovery cron fires\n[Relationship Correlation]  ← AI finds semantic links between facts\n       ↓  relates_to edges created between connected facts\n[Richer Knowledge Graph]  ← facts are no longer isolated; they form a web\n       ↓  on every new message — auto-injection reads the graph\n[Context Window]  ← relevant facts + relations + episodes injected automatically\n       ↓\n[Better Responses]  ← agent uses accumulated knowledge to respond more accurately\n       ↑  new insights written back to memory files → cycle repeats\n\nWhat Each Scheduled Job Does\nJob 1 — Knowledge Extraction (every 6 hours)\n\nScript: scripts/extract-knowledge.py extract\n\nReads MEMORY.md and all memory/YYYY-MM-DD.md files in the workspace\nUses an LLM (GPT-4) to extract structured facts, entities, and key concepts\nHashes file content to skip unchanged files — only processes diffs\nStores each fact with:\nA vector embedding (OpenAI text-embedding-3-small) for semantic search\nA confidence score (defaults to 0.9)\nAn agent_id tag so facts stay isolated to the right agent\nsource metadata pointing back to the originating file\nResult: raw conversational knowledge becomes searchable, structured memory\nJob 2 — Relationship Correlation (daily at 3 AM)\n\nScript: scripts/extract-knowledge.py discover-relations\n\nQueries the graph for facts that have no relationships yet (\"isolated facts\")\nBatches them and asks an LLM to identify semantic connections between them\nCreates relates_to edges in SurrealDB linking related facts\nResult: isolated facts become a connected knowledge web — the agent can now traverse relationships, not just keyword-match\nOver time, the graph evolves from a flat list into a rich semantic network\nJob 3 — Deduplication (daily at 4 AM)\n\nScript: scripts/extract-knowledge.py dedupe --threshold 0.92\n\nCompares all facts using vector similarity (cosine distance)\nFacts above the threshold (92% similar) are flagged as duplicates\nKeeps the higher-confidence fact, removes the duplicate\nPrevents extraction from creating bloat over time\nResult: a clean, non-redundant knowledge base\nJob 4 — Reconciliation (weekly, Sundays at 5 AM)\n\nScript: scripts/extract-knowledge.py reconcile --verbose\n\nApplies time-based confidence decay to aging facts\nPrunes facts that have decayed below minimum confidence\nCleans orphaned entities with no linked facts\nConsolidates near-duplicate entities\nResult: the knowledge graph stays healthy, relevant, and pruned of stale information\nWhy This Makes Agents Self-Improving\n\nWhen auto-injection is enabled, every new conversation starts with the most relevant slice of the accumulated knowledge graph. As the agent:\n\nHas conversations → writes insights to memory files\nExtraction job fires → converts those insights into structured facts\nRelation job fires → connects those facts to existing knowledge\nNext conversation → auto-injection pulls in richer, more connected context\n\n...the agent effectively gets smarter with every cycle. It learns from its own outputs, grounds future responses in its accumulated history, and avoids repeating mistakes (via episodic memory and outcome calibration).\n\nOpenClaw Cron Jobs (Required)\n\nThe skill requires 5 cron jobs for full self-improving operation. All run as isolated background sessions with no delivery:\n\nJob Name\tSchedule\tWhat it runs\nMemory Knowledge Extraction\tEvery 6 hours (0 */6 * * *)\textract-knowledge.py extract — extracts facts from memory files\nMemory Relation Discovery\tDaily at 3 AM (0 3 * * *)\textract-knowledge.py discover-relations — AI-powered relationship finding\nMemory Deduplication\tDaily at 4 AM (0 4 * * *)\textract-knowledge.py dedupe --threshold 0.92 — removes duplicate/near-duplicate facts\nMemory Reconciliation\tWeekly Sun 5 AM (0 5 * * 0)\textract-knowledge.py reconcile --verbose — prunes stale facts, applies confidence decay, cleans orphans\n\nAll jobs use sessionTarget: \"isolated\" with delivery: none. They run in fully isolated background sessions and never fire into the main agent session. A bottom-right corner toast notification appears in the Control UI when each job starts and completes.\n\nSetup commands (run after installation):\n\n# 1. Knowledge Extraction — every 6 hours\nopenclaw cron add \\\n  --name \"Memory Knowledge Extraction\" \\\n  --cron \"0 */6 * * *\" \\\n  --agent main --session isolated --no-deliver \\\n  --timeout-seconds 300 \\\n  --message \"Run memory knowledge extraction. Execute: cd SKILL_DIR && source .venv/bin/activate && python3 scripts/extract-knowledge.py extract\"\n\n# 2. Relation Discovery — daily at 3 AM\nopenclaw cron add \\\n  --name \"Memory Relation Discovery\" \\\n  --cron \"0 3 * * *\" --exact \\\n  --agent main --session isolated --no-deliver \\\n  --timeout-seconds 300 \\\n  --message \"Run memory relation discovery. Execute: cd SKILL_DIR && source .venv/bin/activate && python3 scripts/extract-knowledge.py discover-relations\"\n\n# 3. Deduplication — daily at 4 AM\nopenclaw cron add \\\n  --name \"Memory Deduplication\" \\\n  --cron \"0 4 * * *\" --exact \\\n  --agent main --session isolated --no-deliver \\\n  --timeout-seconds 120 \\\n  --message \"Run knowledge graph deduplication. Execute: cd SKILL_DIR && source .venv/bin/activate && python3 scripts/extract-knowledge.py dedupe --threshold 0.92\"\n\n# 4. Reconciliation — weekly on Sundays at 5 AM\nopenclaw cron add \\\n  --name \"Memory Reconciliation\" \\\n  --cron \"0 5 * * 0\" --exact \\\n  --agent main --session isolated --no-deliver \\\n  --timeout-seconds 180 \\\n  --message \"Run knowledge graph reconciliation. Execute: cd SKILL_DIR && source .venv/bin/activate && python3 scripts/extract-knowledge.py reconcile --verbose\"\n\n\nReplace SKILL_DIR with your actual skill path.\n\nTo check job status:\n\nopenclaw cron list\n\nAdding Cron Jobs for a New Agent\n\nWhen spawning a new agent that should self-improve, register its own extraction job:\n\n# OpenClaw cron add (via Koda) — example for a 'scout-monitor' agent\n# Schedule: every 6h, extract facts tagged to scout-monitor\npython3 scripts/extract-knowledge.py extract --agent-id scout-monitor\n\n\nThe --agent-id flag ensures extracted facts are isolated to that agent's pool and don't pollute the main agent's knowledge. Each agent self-improves independently while still reading shared scope='global' facts.\n\nFeatures (v2.2)\nFeature\tDescription\nSemantic Facts\tVector-indexed facts with confidence scoring\nEpisodic Memory\tTask histories with decisions, problems, solutions, learnings\nWorking Memory\tYAML-based task state that survives crashes\nOutcome Calibration\tFacts used in successful tasks gain confidence\nAuto-Injection\tRelevant facts/episodes injected into prompts automatically\nEntity Extraction\tAutomatic entity linking and relationship discovery\nConfidence Decay\tStale facts naturally decay over time\nAgent Isolation\tEach agent has its own scoped memory pool; scope='global' facts are shared across all agents\nSelf-Improving Loop\tScheduled extraction + relation discovery automatically grow the graph\nAgent Isolation (v2.2)\n\nEach agent in OpenClaw has its own scoped memory pool. Facts are tagged with agent_id on write; all read queries filter to (agent_id = $agent_id OR scope = 'global').\n\nHow it works\nAgent A (main)          Agent B (scout-monitor)\n   ┌──────────┐              ┌──────────┐\n   │ 391 facts│              │   0 facts│   ← isolated pools\n   └──────────┘              └──────────┘\n         ↑                         ↑\n         └──── scope='global' ─────┘   ← shared facts visible to both\n\nStoring facts\n\nAll knowledge_store / knowledge_store_sync calls accept agent_id:\n\n# Stored to scout-monitor's pool only\nmcporter call surrealdb-memory.knowledge_store \\\n    content=\"API is healthy at /ping\" \\\n    agent_id='scout-monitor'\n\n# Stored globally (visible to all agents)\nmcporter call surrealdb-memory.knowledge_store \\\n    content=\"Project uses Python 3.12\" \\\n    agent_id='main' scope='global'\n\nAuto-injection (agent-aware)\n\nWith references/enhanced-loop-hook-agent-isolation.md applied to src/agents/enhanced-loop-hook.ts, the enhanced loop automatically extracts the agent ID from the session key and passes it to memory_inject. No manual configuration needed — each agent's auto-injection is silently scoped to its own facts.\n\nExtraction (agent-aware)\n\nPass --agent-id to extract-knowledge.py so cron-extracted facts are correctly tagged:\n\npython3 scripts/extract-knowledge.py extract --agent-id scout-monitor\n\n\nDefault is \"main\". Update cron jobs accordingly for non-main agents.\n\nBackward compatibility\n\nExisting facts without an explicit agent_id are treated as owned by \"main\". Nothing is lost on upgrade to v2.2.\n\nDashboard UI\n\nThe Memory tab in the Control dashboard provides a two-column layout:\n\nLeft Column: Dashboard\n📊 Statistics — Live counts of facts, entities, relations, and archived items\nConfidence Bar — Visual display of average confidence score\nSources Breakdown — Facts grouped by source file\n🏥 System Health — Status of SurrealDB, schema, and Python dependencies\n🔗 DB Studio — Quick link to SurrealDB's web interface\nRight Column: Operations\n\n📥 Knowledge Extraction\n\nExtract Changes — Incrementally extract facts from modified files\nFind Relations — Discover semantic relationships between existing facts\nFull Sync — Complete extraction + relation discovery\nProgress bar with real-time status updates\n\n🔧 Maintenance\n\nApply Decay — Reduce confidence of stale facts\nPrune Stale — Archive facts below threshold\nFull Sweep — Complete maintenance cycle\n\n💡 Tips — Quick reference for operations\n\nWhen the system needs setup, an Installation section appears with manual controls.\n\nPrerequisites\n\nSurrealDB installed and running:\n\n# Install (one-time)\n./scripts/install.sh\n\n# Start server\nsurreal start --bind 127.0.0.1:8000 --user root --pass root file:~/.openclaw/memory/knowledge.db\n\n\nPython dependencies (use the skill's venv):\n\ncd /path/to/surrealdb-memory\npython3 -m venv .venv\nsource .venv/bin/activate\npip install surrealdb openai pyyaml\n\n\nOpenAI API key for embeddings (set in OpenClaw config or environment)\n\nmcporter configured with this skill's MCP server\n\nMCP Server Setup\n\nAdd to your config/mcporter.json:\n\n{\n  \"servers\": {\n    \"surrealdb-memory\": {\n      \"command\": [\"python3\", \"/path/to/surrealdb-memory/scripts/mcp-server-v2.py\"],\n      \"env\": {\n        \"OPENAI_API_KEY\": \"${OPENAI_API_KEY}\",\n        \"SURREAL_URL\": \"http://localhost:8000\",\n        \"SURREAL_USER\": \"root\",\n        \"SURREAL_PASS\": \"root\"\n      }\n    }\n  }\n}\n\nMCP Tools (11 total)\nCore Tools\nTool\tDescription\nknowledge_search\tSemantic search for facts\nknowledge_recall\tGet a fact with full context (relations, entities)\nknowledge_store\tStore a new fact\nknowledge_stats\tGet database statistics\nv2 Tools\nTool\tDescription\nknowledge_store_sync\tStore with importance routing (high importance = immediate write)\nepisode_search\tFind similar past tasks\nepisode_learnings\tGet actionable learnings from history\nepisode_store\tRecord a completed task episode\nworking_memory_status\tGet current task state\ncontext_aware_search\tSearch with task context boosting\nmemory_inject\tIntelligent context injection for prompts\nmemory_inject Tool\n\nThe memory_inject tool returns formatted context ready for prompt injection:\n\n# Scoped to a specific agent (returns only that agent's facts + global facts)\nmcporter call surrealdb-memory.memory_inject \\\n    query=\"user message\" \\\n    max_facts:7 \\\n    max_episodes:3 \\\n    confidence_threshold:0.9 \\\n    include_relations:true \\\n    agent_id='scout-monitor'\n\n\nOutput:\n\n## Semantic Memory (Relevant Facts)\n📌 [60% relevant, 100% confidence] Relevant fact here...\n\n## Related Entities\n• Entity Name (type)\n\n## Episodic Memory (Past Experiences)\n✅ Task: Previous task goal [similarity]\n   → Key learning from that task\n\nAuto-Injection (Enhanced Loop Integration)\n\nWhen enabled, memory is automatically injected into every agent turn:\n\nEnable in Mode UI:\n\nOpen Control dashboard → Mode tab\nScroll to \"🧠 Memory & Knowledge Graph\" section\nToggle \"Auto-Inject Context\"\nConfigure limits (max facts, max episodes, confidence threshold)\n\nHow it works:\n\nOn each user message, memory_inject is called automatically\nRelevant facts are searched based on the user's query\nIf average fact confidence < threshold, episodic memories are included\nFormatted context is injected into the agent's system prompt\nv2.2: With references/enhanced-loop-hook-agent-isolation.md applied, the active agent's ID is automatically extracted from the session key and passed as agent_id — each agent's injection is silently scoped to its own facts\n\nConfiguration (in Mode settings):\n\nSetting\tDefault\tDescription\nAuto-Inject Context\tOff\tMaster toggle\nMax Facts\t7\tMaximum semantic facts to inject\nMax Episodes\t3\tMaximum episodic memories\nConfidence Threshold\t90%\tInclude episodes when below this\nInclude Relations\tOn\tInclude entity relationships\nCLI Commands\n# Activate venv\nsource .venv/bin/activate\n\n# Store a fact\npython scripts/memory-cli.py store \"Important fact\" --confidence 0.9\n\n# Search\npython scripts/memory-cli.py search \"query\"\n\n# Get stats\npython scripts/knowledge-tool.py stats\n\n# Run maintenance\npython scripts/memory-cli.py maintain\n\n# Extract from files (incremental)\npython scripts/extract-knowledge.py extract\n\n# Extract for a specific agent\npython scripts/extract-knowledge.py extract --agent-id scout-monitor\n\n# Force full extraction (all files, not just changed)\npython scripts/extract-knowledge.py extract --full\n\n# Discover semantic relationships\npython scripts/extract-knowledge.py discover-relations\n\nDatabase Schema (v2)\nTables\nfact — Semantic facts with embeddings and confidence\nentity — Extracted entities (people, places, concepts)\nrelates_to — Relationships between facts\nmentions — Fact-to-entity links\nepisode — Task histories with outcomes\nworking_memory — Active task snapshots\nKey Fields (fact)\ncontent — The fact text\nembedding — Vector for semantic search\nconfidence — Base confidence (0-1)\nsuccess_count / failure_count — Outcome tracking\nscope — global, client, or agent\nagent_id — Which agent owns this fact (v2.2)\nKey Fields (episode)\ngoal — What was attempted\noutcome — success, failure, abandoned\ndecisions — Key decisions made\nproblems — Problems encountered (structured)\nsolutions — Solutions applied (structured)\nkey_learnings — Extracted lessons\nConfidence Scoring\n\nEffective confidence is calculated from:\n\nBase confidence (0.0–1.0)\n+ Inherited boost from supporting facts\n+ Entity boost from well-established entities\n+ Outcome adjustment based on success/failure history\n- Contradiction drain from conflicting facts\n- Time decay (configurable, ~5% per month)\nMaintenance\nAutomated — OpenClaw Cron (as deployed)\n\nThe self-improving loop runs via 4 registered OpenClaw cron jobs:\n\nEvery 6h     → extract-knowledge.py extract            (extract facts from memory files)\nDaily 3 AM   → extract-knowledge.py discover-relations  (find relationships between facts)\nDaily 4 AM   → extract-knowledge.py dedupe              (remove duplicate facts)\nWeekly Sun   → extract-knowledge.py reconcile            (prune stale, decay, clean orphans)\n\n\nSee the \"OpenClaw Cron Jobs (Required)\" section above for setup commands.\n\nTo verify they're active:\n\nopenclaw cron list\n\n\nTo manually trigger any job:\n\ncd SKILL_DIR && source .venv/bin/activate\npython3 scripts/extract-knowledge.py extract\npython3 scripts/extract-knowledge.py discover-relations\npython3 scripts/extract-knowledge.py dedupe --threshold 0.92\npython3 scripts/extract-knowledge.py reconcile --verbose\n\nManual (UI)\n\nUse the Maintenance section in the Memory tab:\n\nApply Decay — Reduce confidence of stale facts\nPrune Stale — Archive facts below 0.3 confidence\nFull Sweep — Run complete maintenance cycle\nFiles\nScripts\nFile\tPurpose\nmcp-server-v2.py\tMCP server with all 11 tools\nmcp-server.py\tLegacy v1 MCP server\nepisodes.py\tEpisodic memory module\nworking_memory.py\tWorking memory module\nmemory-cli.py\tCLI for manual operations\nextract-knowledge.py\tBulk extraction from files (supports --agent-id)\nknowledge-tools.py\tHigher-level extraction\nschema-v2.sql\tv2 database schema\nmigrate-v2.py\tMigration script\nIntegration\nFile\tPurpose\nopenclaw-integration/gateway/memory.ts\tGateway server methods\nopenclaw-integration/ui/memory-view.ts\tMemory dashboard UI\nopenclaw-integration/ui/memory-controller.ts\tUI controller\nTroubleshooting\n\n\"Connection refused\" → Start SurrealDB: surreal start --bind 127.0.0.1:8000 --user root --pass root file:~/.openclaw/memory/knowledge.db\n\n\"No MCP servers configured\" → Ensure mcporter is run from a directory containing config/mcporter.json with the surrealdb-memory server defined\n\nMemory injection returning null → Check that OPENAI_API_KEY is set in the environment → Verify SurrealDB is running and schema is initialized\n\nEmpty search results → Run extraction from the UI or via CLI: python3 scripts/extract-knowledge.py extract\n\n\"No facts to analyze\" on relation discovery → This is normal if all facts are already related — the graph is well-connected. Run extraction first if the graph is empty.\n\nProgress bar not updating → Ensure the gateway has been restarted after UI updates → Check browser console for polling errors\n\nFacts from wrong agent appearing → Check that agent_id is being passed correctly to all store/search calls → Verify references/enhanced-loop-hook-agent-isolation.md is applied for auto-injection scoping\n\nMigration from v1 / v2.1\n# Apply v2 schema (additive, won't delete existing data)\n./scripts/migrate-v2.sh\n\n# Or manually:\nsource .venv/bin/activate\npython scripts/migrate-v2.py\n\n\nAll existing facts without an agent_id are treated as owned by \"main\" — backward compatible.\n\nStats\n\nCheck your knowledge graph via UI (Dashboard section) or CLI:\n\nmcporter call surrealdb-memory.knowledge_stats\n\n\nExample output:\n\n{\n  \"facts\": 379,\n  \"entities\": 485,\n  \"relations\": 106,\n  \"episodes\": 3,\n  \"avg_confidence\": 0.99\n}\n\n\nv2.2 — Agent isolation, self-improving loop, cron-based extraction & relationship correlation"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/maverick-software/surrealdb-knowledge-graph-memory",
    "publisherUrl": "https://clawhub.ai/maverick-software/surrealdb-knowledge-graph-memory",
    "owner": "maverick-software",
    "version": "2.2.3",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/surrealdb-knowledge-graph-memory",
    "downloadUrl": "https://openagent3.xyz/downloads/surrealdb-knowledge-graph-memory",
    "agentUrl": "https://openagent3.xyz/skills/surrealdb-knowledge-graph-memory/agent",
    "manifestUrl": "https://openagent3.xyz/skills/surrealdb-knowledge-graph-memory/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/surrealdb-knowledge-graph-memory/agent.md"
  }
}