{
  "schemaVersion": "1.0",
  "item": {
    "slug": "content-ops",
    "name": "Content Ops",
    "source": "tencent",
    "type": "skill",
    "category": "内容创作",
    "sourceUrl": "https://clawhub.ai/cwyhkyochen-a11y/content-ops",
    "canonicalUrl": "https://clawhub.ai/cwyhkyochen-a11y/content-ops",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/content-ops",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=content-ops",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "AGENT_ONBOARDING.md",
      "DISTRIBUTION.md",
      "IMAGE_GENERATION_SETUP.md",
      "MCP_SERVICES.md",
      "QUICKSTART.md",
      "QUICK_REFERENCE.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-23T16:43:11.935Z",
      "expiresAt": "2026-04-30T16:43:11.935Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
        "contentDisposition": "attachment; filename=\"4claw-imageboard-1.0.1.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/content-ops"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/content-ops",
    "agentPageUrl": "https://openagent3.xyz/skills/content-ops/agent",
    "manifestUrl": "https://openagent3.xyz/skills/content-ops/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/content-ops/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Content Ops System",
        "body": "社交媒体内容运营自动化系统，使用 SQLite + Drizzle ORM 存储数据，支持小红书、Reddit、Pinterest、Discord 等平台的内容抓取、策划、发布和数据分析。"
      },
      {
        "title": "📋 目录",
        "body": "初始化部署\n测试任务\n正式任务\n工作流详解\n参考文档"
      },
      {
        "title": "1.1 基础环境",
        "body": "Node.js 依赖\n\ncd /home/admin/.openclaw/workspace/skills/content-ops\n\n# 安装依赖\nnpm install\n\n# 生成并执行数据库迁移\nnpx drizzle-kit generate\nnpx drizzle-kit migrate\n\nPython 依赖（可选，用于增强功能）\n\n# 如果需要使用 xiaohongshutools skill\npip install aiohttp loguru pycryptodome getuseragent requests"
      },
      {
        "title": "1.2 MCP 服务部署",
        "body": "小红书 MCP (xpzouying/xiaohongshu-mcp)\n\n下载部署：\n\ncd ~/.openclaw/workspace/bin\n\n# 下载二进制文件\nwget https://github.com/xpzouying/xiaohongshu-mcp/releases/download/v2026.02.28.1720-8a7fe21/xiaohongshu-mcp-linux-amd64.tar.gz\ntar -xzf xiaohongshu-mcp-linux-amd64.tar.gz\n\n# 登录（首次，扫码）\n./xiaohongshu-login\n\n# 启动服务（后台运行）\nscreen -dmS xhs-mcp ./xiaohongshu-mcp -headless=true\n\n服务信息：\n\n端口：18060\n端点：http://localhost:18060\nCookie 文件：~/.openclaw/workspace/bin/cookies.json\n\n验证服务：\n\ncurl http://localhost:18060/api/v1/login/status"
      },
      {
        "title": "1.3 数据库初始化",
        "body": "自动创建的数据表：\n\n表名用途核心字段target_accounts被运营账号（Reddit等）platform, api_config, positioningsource_accounts信息源账号（小红书等）login_status, daily_quotacrawl_tasks抓取任务status, query_list, target_countcrawl_results抓取结果source_url, content, quality_scorepublish_tasks发布任务status, content, scheduled_atpublish_metrics_daily发布内容每日数据metric_date, reddit_scoretarget_accounts_metrics_daily账号整体每日数据followers_change, engagement_rate\n\n数据库位置：\n\n~/.openclaw/workspace/content-ops-workspace/data/content-ops.db"
      },
      {
        "title": "1.4 账号配置",
        "body": "添加小红书信息源账号\n\nnpx tsx scripts/add-xhs-account.ts\n\n添加 Reddit 目标账号\n\nnpx tsx scripts/add-reddit-account.ts"
      },
      {
        "title": "2.1 测试小红书抓取（无需登录）",
        "body": "# 测试搜索\ncurl -X POST http://localhost:18060/api/v1/feeds/search \\\n  -H \"Content-Type: application/json\" \\\n  -d '{\"keyword\": \"AI人工智能\", \"filters\": {\"sort_by\": \"最多点赞\"}}'"
      },
      {
        "title": "2.2 测试 MCP 服务状态",
        "body": "# 检查登录状态\ncurl http://localhost:18060/api/v1/login/status\n\n# 预期返回：\n# {\"success\": true, \"data\": {\"is_logged_in\": true, \"username\": \"xxx\"}}"
      },
      {
        "title": "2.3 测试数据库连接",
        "body": "# 查看数据概览\nnpx tsx scripts/show-overview.ts"
      },
      {
        "title": "2.4 完整测试流程",
        "body": "# 1. 创建测试抓取任务\nnpx tsx scripts/create-crawl-task.ts --keyword \"AI教程\" --count 5\n\n# 2. 执行抓取\nnpx tsx scripts/execute-crawl.ts --task-id <task-id>\n\n# 3. 查看结果\nnpx tsx scripts/show-crawl-results.ts --task-id <task-id>\n\n# 4. 审核（测试用：全部通过）\nnpx tsx scripts/approve-all.ts --task-id <task-id>"
      },
      {
        "title": "3.1 内容抓取 Workflow",
        "body": "Step 1: 创建抓取任务\n\nnpx tsx scripts/create-crawl-task.ts \\\n  --platform xiaohongshu \\\n  --keywords \"AI人工智能,ChatGPT,AI工具\" \\\n  --sort-by \"最多点赞\" \\\n  --target-count 50\n\nStep 2: 查看待审核列表\n\nnpx tsx scripts/show-crawl-results.ts --task-id <task-id>\n\nStep 3: 人工审核\n\n# 通过指定序号\nnpx tsx scripts/approve-items.ts --task-id <task-id> --items 1,2,3,5\n\n# 或全部通过\nnpx tsx scripts/approve-all.ts --task-id <task-id>\n\nStep 4: 补充详情（可选）\n\n# 查看需要补充详情的列表\nnpx tsx scripts/show-pending-details.ts\n\n# 用户提供详情后导入\nnpx tsx scripts/import-manual-detail.ts --input /tmp/manual_details.txt"
      },
      {
        "title": "3.2 内容发布 Workflow",
        "body": "Step 1: 选择语料创建发布任务\n\nnpx tsx scripts/create-publish-task.ts \\\n  --source-ids <note-id-1>,<note-id-2> \\\n  --target-platform reddit \\\n  --target-account <account-id>\n\nStep 2: 生成内容（AI redesign）\n\nnpx tsx scripts/generate-content.ts --task-id <publish-task-id>\n\nStep 3: 审核发布内容\n\nnpx tsx scripts/review-publish-content.ts --task-id <publish-task-id>\n\nStep 4: 执行发布\n\nnpx tsx scripts/execute-publish.ts --task-id <publish-task-id>"
      },
      {
        "title": "3.3 数据复盘 Workflow",
        "body": "# 抓取昨日数据\nnpx tsx scripts/fetch-metrics.ts --date yesterday\n\n# 生成数据报告\nnpx tsx scripts/generate-report.ts --period 7d"
      },
      {
        "title": "4.1 内容抓取流程",
        "body": "用户确认主题\n    ↓\n创建抓取任务 (crawl_tasks)\n    ↓\n调用 /api/v1/feeds/search 获取列表\n    ↓\n保存结果到 crawl_results (标题、互动数据)\n    ↓\n通知人工确认\n    ↓\n审核通过 → 标记为可用 (curation_status='approved')\n    ↓\n（可选）人工补充详情正文\n\n⚠️ 抓取限制说明：\n\n小红书网页端有严格的反爬机制：\n\n搜索列表 ✅ 可用\n\n可获取：标题、作者、互动数据（点赞/收藏/评论数）、封面图\n可识别：内容类型（video/normal）\n\n\n\n详情接口 ❌ 受限\n\n多数笔记返回 \"笔记不可访问\" 或空数据\n无法获取：完整正文、评论列表\n原因：小红书 App-only 内容限制"
      },
      {
        "title": "4.2 人工辅助详情导入",
        "body": "当自动抓取无法获取详情时，支持人工补充：\n\n查看待补充列表：\n\nnpx tsx scripts/show-pending-details.ts\n\n用户提供详情格式：\n\n详情 1\n[复制粘贴第一篇笔记的正文内容]\n---\n详情 3\n[复制粘贴第三篇笔记的正文内容]\n---\n\n导入到数据库：\n\nnpx tsx scripts/import-manual-detail.ts --input /tmp/manual_details.txt\n\n数据会同时保存到：\n\ncrawl_results 表的 content 字段\ncorpus/manual/ 目录的 JSON 文件"
      },
      {
        "title": "4.3 内容发布流程",
        "body": "选择可用语料 (crawl_results)\n    ↓\n创建发布任务 (publish_tasks) - status='draft'\n    ↓\nAI 基于语料生成内容 → status='pending_review'\n    ↓\n人工审核 → status='approved'\n    ↓\n定时发布 → status='scheduled' → 'published'\n    ↓\n每日抓取数据 (publish_metrics_daily)"
      },
      {
        "title": "五、参考文档",
        "body": "文档说明给谁看使用流程手册完整操作流程，从安装到日常运营👤 用户必看快速上手指南10分钟快速启动👤 新用户数据库表结构完整表结构🤖 开发者详细工序设计多Agent协作流程🤖 开发者"
      },
      {
        "title": "常用查询",
        "body": "首页看板数据：\n\nconst stats = await queries.getOverviewStats();\n// {\n//   activeAccounts: 5,\n//   todayScheduledTasks: 3,\n//   pendingCorpus: 20,\n//   availableCorpus: 150,\n//   weeklyPublished: 21\n// }\n\n账号7天趋势：\n\nconst trend = await queries.getAccountTrend(accountId, 7);\n\n内容表现排行：\n\nconst topContent = await queries.getTopPerformingContent(accountId, 30, 10);"
      },
      {
        "title": "数据库备份",
        "body": "# 复制文件即可备份\ncp ~/.openclaw/workspace/content-ops-workspace/data/content-ops.db \\\n   ~/.openclaw/workspace/content-ops-workspace/data/backup-$(date +%Y%m%d).db"
      },
      {
        "title": "目录结构",
        "body": "~/.openclaw/workspace/content-ops-workspace/\n├── data/\n│   └── content-ops.db          # SQLite 数据库文件\n├── accounts/                    # Markdown 账号档案\n├── strategies/                  # 运营策略文档\n├── corpus/\n│   ├── raw/                    # 原始抓取语料\n│   ├── manual/                 # 人工导入语料\n│   └── published/              # 已发布内容\n└── reports/                    # 数据报告"
      },
      {
        "title": "部署前检查",
        "body": "Node.js 依赖安装完成 (npm install)\n 数据库迁移执行完成 (npx drizzle-kit migrate)\n 小红书 MCP 服务运行中 (curl http://localhost:18060/api/v1/login/status)\n Cookie 文件存在 (~/.openclaw/workspace/bin/cookies.json)"
      },
      {
        "title": "测试任务检查",
        "body": "MCP 登录状态正常\n 测试搜索能返回结果\n 数据库能写入数据\n 审核流程正常"
      },
      {
        "title": "正式任务检查",
        "body": "源账号已添加 (source_accounts)\n 目标账号已添加 (target_accounts)\n 抓取任务创建成功\n 发布任务能正常生成内容"
      }
    ],
    "body": "Content Ops System\n\n社交媒体内容运营自动化系统，使用 SQLite + Drizzle ORM 存储数据，支持小红书、Reddit、Pinterest、Discord 等平台的内容抓取、策划、发布和数据分析。\n\n📋 目录\n初始化部署\n测试任务\n正式任务\n工作流详解\n参考文档\n一、初始化部署\n1.1 基础环境\nNode.js 依赖\ncd /home/admin/.openclaw/workspace/skills/content-ops\n\n# 安装依赖\nnpm install\n\n# 生成并执行数据库迁移\nnpx drizzle-kit generate\nnpx drizzle-kit migrate\n\nPython 依赖（可选，用于增强功能）\n# 如果需要使用 xiaohongshutools skill\npip install aiohttp loguru pycryptodome getuseragent requests\n\n1.2 MCP 服务部署\n小红书 MCP (xpzouying/xiaohongshu-mcp)\n\n下载部署：\n\ncd ~/.openclaw/workspace/bin\n\n# 下载二进制文件\nwget https://github.com/xpzouying/xiaohongshu-mcp/releases/download/v2026.02.28.1720-8a7fe21/xiaohongshu-mcp-linux-amd64.tar.gz\ntar -xzf xiaohongshu-mcp-linux-amd64.tar.gz\n\n# 登录（首次，扫码）\n./xiaohongshu-login\n\n# 启动服务（后台运行）\nscreen -dmS xhs-mcp ./xiaohongshu-mcp -headless=true\n\n\n服务信息：\n\n端口：18060\n端点：http://localhost:18060\nCookie 文件：~/.openclaw/workspace/bin/cookies.json\n\n验证服务：\n\ncurl http://localhost:18060/api/v1/login/status\n\n1.3 数据库初始化\n\n自动创建的数据表：\n\n表名\t用途\t核心字段\ntarget_accounts\t被运营账号（Reddit等）\tplatform, api_config, positioning\nsource_accounts\t信息源账号（小红书等）\tlogin_status, daily_quota\ncrawl_tasks\t抓取任务\tstatus, query_list, target_count\ncrawl_results\t抓取结果\tsource_url, content, quality_score\npublish_tasks\t发布任务\tstatus, content, scheduled_at\npublish_metrics_daily\t发布内容每日数据\tmetric_date, reddit_score\ntarget_accounts_metrics_daily\t账号整体每日数据\tfollowers_change, engagement_rate\n\n数据库位置：\n\n~/.openclaw/workspace/content-ops-workspace/data/content-ops.db\n\n1.4 账号配置\n添加小红书信息源账号\nnpx tsx scripts/add-xhs-account.ts\n\n添加 Reddit 目标账号\nnpx tsx scripts/add-reddit-account.ts\n\n二、测试任务\n2.1 测试小红书抓取（无需登录）\n# 测试搜索\ncurl -X POST http://localhost:18060/api/v1/feeds/search \\\n  -H \"Content-Type: application/json\" \\\n  -d '{\"keyword\": \"AI人工智能\", \"filters\": {\"sort_by\": \"最多点赞\"}}'\n\n2.2 测试 MCP 服务状态\n# 检查登录状态\ncurl http://localhost:18060/api/v1/login/status\n\n# 预期返回：\n# {\"success\": true, \"data\": {\"is_logged_in\": true, \"username\": \"xxx\"}}\n\n2.3 测试数据库连接\n# 查看数据概览\nnpx tsx scripts/show-overview.ts\n\n2.4 完整测试流程\n# 1. 创建测试抓取任务\nnpx tsx scripts/create-crawl-task.ts --keyword \"AI教程\" --count 5\n\n# 2. 执行抓取\nnpx tsx scripts/execute-crawl.ts --task-id <task-id>\n\n# 3. 查看结果\nnpx tsx scripts/show-crawl-results.ts --task-id <task-id>\n\n# 4. 审核（测试用：全部通过）\nnpx tsx scripts/approve-all.ts --task-id <task-id>\n\n三、正式任务\n3.1 内容抓取 Workflow\n\nStep 1: 创建抓取任务\n\nnpx tsx scripts/create-crawl-task.ts \\\n  --platform xiaohongshu \\\n  --keywords \"AI人工智能,ChatGPT,AI工具\" \\\n  --sort-by \"最多点赞\" \\\n  --target-count 50\n\n\nStep 2: 查看待审核列表\n\nnpx tsx scripts/show-crawl-results.ts --task-id <task-id>\n\n\nStep 3: 人工审核\n\n# 通过指定序号\nnpx tsx scripts/approve-items.ts --task-id <task-id> --items 1,2,3,5\n\n# 或全部通过\nnpx tsx scripts/approve-all.ts --task-id <task-id>\n\n\nStep 4: 补充详情（可选）\n\n# 查看需要补充详情的列表\nnpx tsx scripts/show-pending-details.ts\n\n# 用户提供详情后导入\nnpx tsx scripts/import-manual-detail.ts --input /tmp/manual_details.txt\n\n3.2 内容发布 Workflow\n\nStep 1: 选择语料创建发布任务\n\nnpx tsx scripts/create-publish-task.ts \\\n  --source-ids <note-id-1>,<note-id-2> \\\n  --target-platform reddit \\\n  --target-account <account-id>\n\n\nStep 2: 生成内容（AI redesign）\n\nnpx tsx scripts/generate-content.ts --task-id <publish-task-id>\n\n\nStep 3: 审核发布内容\n\nnpx tsx scripts/review-publish-content.ts --task-id <publish-task-id>\n\n\nStep 4: 执行发布\n\nnpx tsx scripts/execute-publish.ts --task-id <publish-task-id>\n\n3.3 数据复盘 Workflow\n# 抓取昨日数据\nnpx tsx scripts/fetch-metrics.ts --date yesterday\n\n# 生成数据报告\nnpx tsx scripts/generate-report.ts --period 7d\n\n四、工作流详解\n4.1 内容抓取流程\n用户确认主题\n    ↓\n创建抓取任务 (crawl_tasks)\n    ↓\n调用 /api/v1/feeds/search 获取列表\n    ↓\n保存结果到 crawl_results (标题、互动数据)\n    ↓\n通知人工确认\n    ↓\n审核通过 → 标记为可用 (curation_status='approved')\n    ↓\n（可选）人工补充详情正文\n\n\n⚠️ 抓取限制说明：\n\n小红书网页端有严格的反爬机制：\n\n搜索列表 ✅ 可用\n\n可获取：标题、作者、互动数据（点赞/收藏/评论数）、封面图\n可识别：内容类型（video/normal）\n\n详情接口 ❌ 受限\n\n多数笔记返回 \"笔记不可访问\" 或空数据\n无法获取：完整正文、评论列表\n原因：小红书 App-only 内容限制\n4.2 人工辅助详情导入\n\n当自动抓取无法获取详情时，支持人工补充：\n\n查看待补充列表：\n\nnpx tsx scripts/show-pending-details.ts\n\n\n用户提供详情格式：\n\n详情 1\n[复制粘贴第一篇笔记的正文内容]\n---\n详情 3\n[复制粘贴第三篇笔记的正文内容]\n---\n\n\n导入到数据库：\n\nnpx tsx scripts/import-manual-detail.ts --input /tmp/manual_details.txt\n\n\n数据会同时保存到：\n\ncrawl_results 表的 content 字段\ncorpus/manual/ 目录的 JSON 文件\n4.3 内容发布流程\n选择可用语料 (crawl_results)\n    ↓\n创建发布任务 (publish_tasks) - status='draft'\n    ↓\nAI 基于语料生成内容 → status='pending_review'\n    ↓\n人工审核 → status='approved'\n    ↓\n定时发布 → status='scheduled' → 'published'\n    ↓\n每日抓取数据 (publish_metrics_daily)\n\n五、参考文档\n文档\t说明\t给谁看\n使用流程手册\t完整操作流程，从安装到日常运营\t👤 用户必看\n快速上手指南\t10分钟快速启动\t👤 新用户\n数据库表结构\t完整表结构\t🤖 开发者\n详细工序设计\t多Agent协作流程\t🤖 开发者\n常用查询\n\n首页看板数据：\n\nconst stats = await queries.getOverviewStats();\n// {\n//   activeAccounts: 5,\n//   todayScheduledTasks: 3,\n//   pendingCorpus: 20,\n//   availableCorpus: 150,\n//   weeklyPublished: 21\n// }\n\n\n账号7天趋势：\n\nconst trend = await queries.getAccountTrend(accountId, 7);\n\n\n内容表现排行：\n\nconst topContent = await queries.getTopPerformingContent(accountId, 30, 10);\n\n数据库备份\n# 复制文件即可备份\ncp ~/.openclaw/workspace/content-ops-workspace/data/content-ops.db \\\n   ~/.openclaw/workspace/content-ops-workspace/data/backup-$(date +%Y%m%d).db\n\n目录结构\n~/.openclaw/workspace/content-ops-workspace/\n├── data/\n│   └── content-ops.db          # SQLite 数据库文件\n├── accounts/                    # Markdown 账号档案\n├── strategies/                  # 运营策略文档\n├── corpus/\n│   ├── raw/                    # 原始抓取语料\n│   ├── manual/                 # 人工导入语料\n│   └── published/              # 已发布内容\n└── reports/                    # 数据报告\n\n快速检查清单\n部署前检查\n Node.js 依赖安装完成 (npm install)\n 数据库迁移执行完成 (npx drizzle-kit migrate)\n 小红书 MCP 服务运行中 (curl http://localhost:18060/api/v1/login/status)\n Cookie 文件存在 (~/.openclaw/workspace/bin/cookies.json)\n测试任务检查\n MCP 登录状态正常\n 测试搜索能返回结果\n 数据库能写入数据\n 审核流程正常\n正式任务检查\n 源账号已添加 (source_accounts)\n 目标账号已添加 (target_accounts)\n 抓取任务创建成功\n 发布任务能正常生成内容"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/cwyhkyochen-a11y/content-ops",
    "publisherUrl": "https://clawhub.ai/cwyhkyochen-a11y/content-ops",
    "owner": "cwyhkyochen-a11y",
    "version": "0.1.1",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/content-ops",
    "downloadUrl": "https://openagent3.xyz/downloads/content-ops",
    "agentUrl": "https://openagent3.xyz/skills/content-ops/agent",
    "manifestUrl": "https://openagent3.xyz/skills/content-ops/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/content-ops/agent.md"
  }
}