{
  "schemaVersion": "1.0",
  "item": {
    "slug": "memory-pipeline",
    "name": "Memory Pipeline",
    "source": "tencent",
    "type": "skill",
    "category": "开发工具",
    "sourceUrl": "https://clawhub.ai/joe-rlo/memory-pipeline",
    "canonicalUrl": "https://clawhub.ai/joe-rlo/memory-pipeline",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/memory-pipeline",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=memory-pipeline",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SKILL.md",
      "openclaw.plugin.json",
      "references/setup.md",
      "scripts/ingest-chatgpt.py",
      "scripts/memory-briefing.py",
      "scripts/memory-extract.py"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-30T16:55:25.780Z",
      "expiresAt": "2026-05-07T16:55:25.780Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
        "contentDisposition": "attachment; filename=\"network-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/memory-pipeline"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/memory-pipeline",
    "agentPageUrl": "https://openagent3.xyz/skills/memory-pipeline/agent",
    "manifestUrl": "https://openagent3.xyz/skills/memory-pipeline/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/memory-pipeline/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Memory Pipeline",
        "body": "Give your AI agent a memory that actually works.\n\nAI agents wake up blank every session. Memory Pipeline fixes that — it extracts what matters from past conversations, connects the dots, and generates a daily briefing so your agent starts each session primed instead of clueless."
      },
      {
        "title": "What It Does",
        "body": "ComponentWhen it runsWhat it doesExtractBetween sessionsPulls structured facts (decisions, preferences, learnings) from daily notes and transcriptsLinkBetween sessionsBuilds a knowledge graph — connects related facts, flags contradictionsBriefBetween sessionsGenerates a compact BRIEFING.md loaded at session startIngestOn demandImports external knowledge (ChatGPT exports, etc.) into searchable memoryPerformance HooksDuring sessionsPre-game briefing injection, tool discipline, output compression, after-action review"
      },
      {
        "title": "Why This Is Different",
        "body": "Most \"memory\" solutions are just vector search over chat logs. This is a cognitive architecture — inspired by how human memory actually works:\n\nExtraction over accumulation — Instead of dumping everything into a database, it identifies what's worth remembering: decisions, preferences, learnings, commitments. The rest is noise.\nKnowledge graph, not just embeddings — Facts get linked to each other with bidirectional relationships. Your agent doesn't just find similar text — it understands that a decision about your tech stack relates to a project deadline relates to a preference you stated three weeks ago.\nBriefing over retrieval — Rather than hoping the right context gets retrieved at query time, your agent starts every session with a curated cheat sheet. Active projects, recent decisions, personality reminders. Zero cold-start lag.\nNo mid-swing coaching — Borrowed from performance psychology. Corrections happen between sessions, not during. The after-action review feeds into the next briefing. The loop is closed — just not mid-execution."
      },
      {
        "title": "Install",
        "body": "clawdhub install memory-pipeline"
      },
      {
        "title": "Setup",
        "body": "bash skills/memory-pipeline/scripts/setup.sh\n\nThe setup script will detect your workspace, check dependencies (Python 3 + any LLM API key), create the memory/ directory, and run the full pipeline."
      },
      {
        "title": "Requirements",
        "body": "Python 3\nAt least one LLM API key (auto-detected):\n\nOpenAI (OPENAI_API_KEY or ~/.config/openai/api_key)\nAnthropic (ANTHROPIC_API_KEY or ~/.config/anthropic/api_key)\nGemini (GEMINI_API_KEY or ~/.config/gemini/api_key)"
      },
      {
        "title": "Run Manually",
        "body": "# Full pipeline\npython3 skills/memory-pipeline/scripts/memory-extract.py\npython3 skills/memory-pipeline/scripts/memory-link.py\npython3 skills/memory-pipeline/scripts/memory-briefing.py"
      },
      {
        "title": "Automate via Heartbeat",
        "body": "Add to your HEARTBEAT.md for daily automatic runs:\n\n### Daily Memory Pipeline\n- **Frequency:** Once per day (morning)\n- **Action:** Run the memory pipeline:\n  1. `python3 skills/memory-pipeline/scripts/memory-extract.py`\n  2. `python3 skills/memory-pipeline/scripts/memory-link.py`\n  3. `python3 skills/memory-pipeline/scripts/memory-briefing.py`"
      },
      {
        "title": "Import External Knowledge",
        "body": "Already have years of conversations in ChatGPT? Import them so your agent knows what you know."
      },
      {
        "title": "ChatGPT Export",
        "body": "# 1. Export from ChatGPT: Settings → Data Controls → Export Data\n# 2. Drop the zip in your workspace\n# 3. Run:\npython3 skills/memory-pipeline/scripts/ingest-chatgpt.py ~/imports/chatgpt-export.zip\n\n# Preview first (recommended):\npython3 skills/memory-pipeline/scripts/ingest-chatgpt.py ~/imports/chatgpt-export.zip --dry-run\n\nWhat it does:\n\nParses ChatGPT's conversation tree format\nFilters out throwaway conversations (configurable: --min-turns, --min-length)\nSupports topic exclusion (edit EXCLUDE_PATTERNS to skip unwanted topics)\nOutputs clean, dated markdown files to memory/knowledge/chatgpt/\nFiles are automatically indexed by OpenClaw's semantic search\n\nOptions:\n\n--dry-run — Preview without writing files\n--keep-all — Skip all filtering\n--min-turns N — Minimum user messages to keep (default: 2)\n--min-length N — Minimum total characters (default: 200)"
      },
      {
        "title": "Adding Other Sources",
        "body": "The pattern is extensible. Create ingest-<source>.py, parse the format, write markdown to memory/knowledge/<source>/. The indexer handles the rest."
      },
      {
        "title": "Stage 1: Extract",
        "body": "Script: memory-extract.py\n\nReads daily notes (memory/YYYY-MM-DD.md) and session transcripts, then uses an LLM to extract structured facts:\n\n{\"type\": \"decision\", \"content\": \"Use Rust for the backend\", \"subject\": \"Project Architecture\", \"confidence\": 0.9}\n{\"type\": \"preference\", \"content\": \"Prefers Google Drive over Notion\", \"subject\": \"Tools\", \"confidence\": 0.95}\n\nOutput: memory/extracted.jsonl"
      },
      {
        "title": "Stage 2: Link",
        "body": "Script: memory-link.py\n\nTakes extracted facts and builds a knowledge graph:\n\nGenerates embeddings for semantic similarity\nCreates bidirectional links between related facts\nDetects contradictions and marks superseded facts\nAuto-generates domain tags\n\nOutput: memory/knowledge-graph.json + memory/knowledge-summary.md"
      },
      {
        "title": "Stage 3: Briefing",
        "body": "Script: memory-briefing.py\n\nGenerates a compact daily briefing (< 2000 chars) combining:\n\nPersonality traits (from SOUL.md)\nUser context (from USER.md)\nActive projects and recent decisions\nOpen todos\n\nOutput: BRIEFING.md (workspace root)"
      },
      {
        "title": "Performance Hooks (Optional)",
        "body": "Four lifecycle hooks that enforce execution discipline during sessions. Based on a principle from performance psychology: separate preparation from execution.\n\nUser Message → Agent Loop\n  ├── before_agent_start  →  Briefing packet (memory + checklist)\n  ├── before_tool_call    →  Policy enforcement (deny list)\n  ├── tool_result_persist →  Output compression (prevent context bloat)\n  └── agent_end           →  After-action review (durable notes)"
      },
      {
        "title": "Configuration",
        "body": "{\n  \"enabled\": true,\n  \"briefing\": {\n    \"maxChars\": 6000,\n    \"checklist\": [\n      \"Restate the task in one sentence.\",\n      \"List constraints and success criteria.\",\n      \"Retrieve only the minimum relevant memory.\",\n      \"Prefer tools over guessing when facts matter.\"\n    ],\n    \"memoryFiles\": [\"memory/IDENTITY.md\", \"memory/PROJECTS.md\"]\n  },\n  \"tools\": {\n    \"deny\": [\"dangerous_tool\"],\n    \"maxToolResultChars\": 12000\n  },\n  \"afterAction\": {\n    \"writeMemoryFile\": \"memory/AFTER_ACTION.md\",\n    \"maxBullets\": 8\n  }\n}"
      },
      {
        "title": "Hook Details",
        "body": "HookWhat it doesbefore_agent_startLoads memory files, builds bounded briefing packet, injects into system promptbefore_tool_callChecks tool against deny list, prevents unsafe callstool_result_persistHead (60%) + tail (30%) compression of large resultsagent_endAppends session summary to memory file with tools used and outcomes"
      },
      {
        "title": "Output Files",
        "body": "FileLocationPurposeBRIEFING.mdWorkspace rootDaily context cheat sheetextracted.jsonlmemory/All extracted facts (append-only)knowledge-graph.jsonmemory/Full graph with embeddings and linksknowledge-summary.mdmemory/Human-readable graph summaryknowledge/chatgpt/*.mdmemory/Ingested ChatGPT conversations"
      },
      {
        "title": "Customization",
        "body": "Change LLM models — Edit model names in each script (supports OpenAI, Anthropic, Gemini)\nAdjust extraction — Modify the extraction prompt in memory-extract.py to focus on different fact types\nTune link sensitivity — Change the similarity threshold in memory-link.py (default: 0.3)\nFilter ingestion — Edit EXCLUDE_PATTERNS in ingest-chatgpt.py for topic exclusion"
      },
      {
        "title": "Troubleshooting",
        "body": "ProblemFixNo facts extractedCheck that daily notes or transcripts exist; verify API keyLow-quality linksAdd OpenAI key for embedding-based similarity; adjust thresholdBriefing too longReduce facts in template or let LLM generation handle it (auto-constrained to 2000 chars)"
      },
      {
        "title": "See Also",
        "body": "Setup Guide — Detailed installation and configuration"
      }
    ],
    "body": "Memory Pipeline\n\nGive your AI agent a memory that actually works.\n\nAI agents wake up blank every session. Memory Pipeline fixes that — it extracts what matters from past conversations, connects the dots, and generates a daily briefing so your agent starts each session primed instead of clueless.\n\nWhat It Does\nComponent\tWhen it runs\tWhat it does\nExtract\tBetween sessions\tPulls structured facts (decisions, preferences, learnings) from daily notes and transcripts\nLink\tBetween sessions\tBuilds a knowledge graph — connects related facts, flags contradictions\nBrief\tBetween sessions\tGenerates a compact BRIEFING.md loaded at session start\nIngest\tOn demand\tImports external knowledge (ChatGPT exports, etc.) into searchable memory\nPerformance Hooks\tDuring sessions\tPre-game briefing injection, tool discipline, output compression, after-action review\nWhy This Is Different\n\nMost \"memory\" solutions are just vector search over chat logs. This is a cognitive architecture — inspired by how human memory actually works:\n\nExtraction over accumulation — Instead of dumping everything into a database, it identifies what's worth remembering: decisions, preferences, learnings, commitments. The rest is noise.\nKnowledge graph, not just embeddings — Facts get linked to each other with bidirectional relationships. Your agent doesn't just find similar text — it understands that a decision about your tech stack relates to a project deadline relates to a preference you stated three weeks ago.\nBriefing over retrieval — Rather than hoping the right context gets retrieved at query time, your agent starts every session with a curated cheat sheet. Active projects, recent decisions, personality reminders. Zero cold-start lag.\nNo mid-swing coaching — Borrowed from performance psychology. Corrections happen between sessions, not during. The after-action review feeds into the next briefing. The loop is closed — just not mid-execution.\nQuick Start\nInstall\nclawdhub install memory-pipeline\n\nSetup\nbash skills/memory-pipeline/scripts/setup.sh\n\n\nThe setup script will detect your workspace, check dependencies (Python 3 + any LLM API key), create the memory/ directory, and run the full pipeline.\n\nRequirements\nPython 3\nAt least one LLM API key (auto-detected):\nOpenAI (OPENAI_API_KEY or ~/.config/openai/api_key)\nAnthropic (ANTHROPIC_API_KEY or ~/.config/anthropic/api_key)\nGemini (GEMINI_API_KEY or ~/.config/gemini/api_key)\nRun Manually\n# Full pipeline\npython3 skills/memory-pipeline/scripts/memory-extract.py\npython3 skills/memory-pipeline/scripts/memory-link.py\npython3 skills/memory-pipeline/scripts/memory-briefing.py\n\nAutomate via Heartbeat\n\nAdd to your HEARTBEAT.md for daily automatic runs:\n\n### Daily Memory Pipeline\n- **Frequency:** Once per day (morning)\n- **Action:** Run the memory pipeline:\n  1. `python3 skills/memory-pipeline/scripts/memory-extract.py`\n  2. `python3 skills/memory-pipeline/scripts/memory-link.py`\n  3. `python3 skills/memory-pipeline/scripts/memory-briefing.py`\n\nImport External Knowledge\n\nAlready have years of conversations in ChatGPT? Import them so your agent knows what you know.\n\nChatGPT Export\n# 1. Export from ChatGPT: Settings → Data Controls → Export Data\n# 2. Drop the zip in your workspace\n# 3. Run:\npython3 skills/memory-pipeline/scripts/ingest-chatgpt.py ~/imports/chatgpt-export.zip\n\n# Preview first (recommended):\npython3 skills/memory-pipeline/scripts/ingest-chatgpt.py ~/imports/chatgpt-export.zip --dry-run\n\n\nWhat it does:\n\nParses ChatGPT's conversation tree format\nFilters out throwaway conversations (configurable: --min-turns, --min-length)\nSupports topic exclusion (edit EXCLUDE_PATTERNS to skip unwanted topics)\nOutputs clean, dated markdown files to memory/knowledge/chatgpt/\nFiles are automatically indexed by OpenClaw's semantic search\n\nOptions:\n\n--dry-run — Preview without writing files\n--keep-all — Skip all filtering\n--min-turns N — Minimum user messages to keep (default: 2)\n--min-length N — Minimum total characters (default: 200)\nAdding Other Sources\n\nThe pattern is extensible. Create ingest-<source>.py, parse the format, write markdown to memory/knowledge/<source>/. The indexer handles the rest.\n\nHow the Pipeline Works\nStage 1: Extract\n\nScript: memory-extract.py\n\nReads daily notes (memory/YYYY-MM-DD.md) and session transcripts, then uses an LLM to extract structured facts:\n\n{\"type\": \"decision\", \"content\": \"Use Rust for the backend\", \"subject\": \"Project Architecture\", \"confidence\": 0.9}\n{\"type\": \"preference\", \"content\": \"Prefers Google Drive over Notion\", \"subject\": \"Tools\", \"confidence\": 0.95}\n\n\nOutput: memory/extracted.jsonl\n\nStage 2: Link\n\nScript: memory-link.py\n\nTakes extracted facts and builds a knowledge graph:\n\nGenerates embeddings for semantic similarity\nCreates bidirectional links between related facts\nDetects contradictions and marks superseded facts\nAuto-generates domain tags\n\nOutput: memory/knowledge-graph.json + memory/knowledge-summary.md\n\nStage 3: Briefing\n\nScript: memory-briefing.py\n\nGenerates a compact daily briefing (< 2000 chars) combining:\n\nPersonality traits (from SOUL.md)\nUser context (from USER.md)\nActive projects and recent decisions\nOpen todos\n\nOutput: BRIEFING.md (workspace root)\n\nPerformance Hooks (Optional)\n\nFour lifecycle hooks that enforce execution discipline during sessions. Based on a principle from performance psychology: separate preparation from execution.\n\nUser Message → Agent Loop\n  ├── before_agent_start  →  Briefing packet (memory + checklist)\n  ├── before_tool_call    →  Policy enforcement (deny list)\n  ├── tool_result_persist →  Output compression (prevent context bloat)\n  └── agent_end           →  After-action review (durable notes)\n\nConfiguration\n{\n  \"enabled\": true,\n  \"briefing\": {\n    \"maxChars\": 6000,\n    \"checklist\": [\n      \"Restate the task in one sentence.\",\n      \"List constraints and success criteria.\",\n      \"Retrieve only the minimum relevant memory.\",\n      \"Prefer tools over guessing when facts matter.\"\n    ],\n    \"memoryFiles\": [\"memory/IDENTITY.md\", \"memory/PROJECTS.md\"]\n  },\n  \"tools\": {\n    \"deny\": [\"dangerous_tool\"],\n    \"maxToolResultChars\": 12000\n  },\n  \"afterAction\": {\n    \"writeMemoryFile\": \"memory/AFTER_ACTION.md\",\n    \"maxBullets\": 8\n  }\n}\n\nHook Details\nHook\tWhat it does\nbefore_agent_start\tLoads memory files, builds bounded briefing packet, injects into system prompt\nbefore_tool_call\tChecks tool against deny list, prevents unsafe calls\ntool_result_persist\tHead (60%) + tail (30%) compression of large results\nagent_end\tAppends session summary to memory file with tools used and outcomes\nOutput Files\nFile\tLocation\tPurpose\nBRIEFING.md\tWorkspace root\tDaily context cheat sheet\nextracted.jsonl\tmemory/\tAll extracted facts (append-only)\nknowledge-graph.json\tmemory/\tFull graph with embeddings and links\nknowledge-summary.md\tmemory/\tHuman-readable graph summary\nknowledge/chatgpt/*.md\tmemory/\tIngested ChatGPT conversations\nCustomization\nChange LLM models — Edit model names in each script (supports OpenAI, Anthropic, Gemini)\nAdjust extraction — Modify the extraction prompt in memory-extract.py to focus on different fact types\nTune link sensitivity — Change the similarity threshold in memory-link.py (default: 0.3)\nFilter ingestion — Edit EXCLUDE_PATTERNS in ingest-chatgpt.py for topic exclusion\nTroubleshooting\nProblem\tFix\nNo facts extracted\tCheck that daily notes or transcripts exist; verify API key\nLow-quality links\tAdd OpenAI key for embedding-based similarity; adjust threshold\nBriefing too long\tReduce facts in template or let LLM generation handle it (auto-constrained to 2000 chars)\nSee Also\nSetup Guide — Detailed installation and configuration"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/joe-rlo/memory-pipeline",
    "publisherUrl": "https://clawhub.ai/joe-rlo/memory-pipeline",
    "owner": "joe-rlo",
    "version": "0.4.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/memory-pipeline",
    "downloadUrl": "https://openagent3.xyz/downloads/memory-pipeline",
    "agentUrl": "https://openagent3.xyz/skills/memory-pipeline/agent",
    "manifestUrl": "https://openagent3.xyz/skills/memory-pipeline/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/memory-pipeline/agent.md"
  }
}