{
  "schemaVersion": "1.0",
  "item": {
    "slug": "b3ehive",
    "name": "B3ehive",
    "source": "tencent",
    "type": "skill",
    "category": "AI 智能",
    "sourceUrl": "https://clawhub.ai/weiyangzen/b3ehive",
    "canonicalUrl": "https://clawhub.ai/weiyangzen/b3ehive",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/b3ehive",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=b3ehive",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "README.md",
      "SKILL.md",
      "config.yaml",
      "package.json",
      "scripts/phase1_spawn.sh",
      "scripts/phase2_evaluate.sh"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-23T16:43:11.935Z",
      "expiresAt": "2026-04-30T16:43:11.935Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=4claw-imageboard",
        "contentDisposition": "attachment; filename=\"4claw-imageboard-1.0.1.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/b3ehive"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/b3ehive",
    "agentPageUrl": "https://openagent3.xyz/skills/b3ehive/agent",
    "manifestUrl": "https://openagent3.xyz/skills/b3ehive/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/b3ehive/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "1. Purpose (PCTF: Purpose)",
        "body": "Enable competitive code generation where three isolated AI agents implement the same functionality, evaluate each other objectively, and deliver the optimal solution through data-driven selection."
      },
      {
        "title": "Input",
        "body": "task_description: String describing the coding task\nconstraints: Optional constraints (time/space complexity, language, etc.)"
      },
      {
        "title": "Output",
        "body": "final_solution: Directory containing the winning implementation\ncomparison_report: Markdown analysis of all three approaches\ndecision_rationale: Explanation of why the winner was selected"
      },
      {
        "title": "Success Criteria",
        "body": "assertions:\n  - final_solution/implementation exists and is runnable\n  - comparison_report.md exists with objective metrics\n  - decision_rationale.md explains selection logic\n  - all three agent implementations are documented\n  - evaluation scores are numeric and justified"
      },
      {
        "title": "3. Chain Flow (PCTF: Chain)",
        "body": "graph TD\n    A[User Task] --> B[Phase 1: Parallel Spawn]\n    B --> C[Agent A: Simplicity]\n    B --> D[Agent B: Speed]\n    B --> E[Agent C: Robustness]\n    C --> F[Phase 2: Cross-Evaluation]\n    D --> F\n    E --> F\n    F --> G[6 Evaluation Reports]\n    G --> H[Phase 3: Self-Scoring]\n    H --> I[3 Scorecards]\n    I --> J[Phase 4: Final Delivery]\n    J --> K[Best Solution]"
      },
      {
        "title": "Phase 1: Parallel Implementation",
        "body": "Agent Prompt Template:\n\nrole: \"Expert Software Engineer\"\nfocus: \"{{agent_focus}}\"  # Simplicity / Speed / Robustness\ntask: \"{{task_description}}\"\nconstraints:\n  - Complete runnable code in implementation/\n  - Checklist.md with ALL items checked\n  - SUMMARY.md with competitive advantages\n  - Must differ from other agents' approaches\n\nlinter_rules:\n  - code_compiles: true\n  - tests_pass: true\n  - no_todos: true\n  - documented: true\n\nassertions:\n  - implementation/main.* exists\n  - tests exist and pass\n  - Checklist.md is complete\n  - SUMMARY.md explains unique approach"
      },
      {
        "title": "Phase 2: Cross-Evaluation",
        "body": "Evaluation Prompt Template:\n\nevaluator: \"Agent {{from}}\"\ntarget: \"Agent {{to}}\"\ntask: \"Objectively prove your solution is superior\"\n\ndimensions:\n  simplicity:\n    weight: 20\n    metrics:\n      - lines_of_code: count\n      - cyclomatic_complexity: calculate\n      - readability_score: 1-10\n  \n  speed:\n    weight: 25\n    metrics:\n      - time_complexity: big_o\n      - space_complexity: big_o\n      - benchmark_results: run_if_possible\n  \n  stability:\n    weight: 25\n    metrics:\n      - error_handling_coverage: percentage\n      - resource_cleanup: check\n      - fault_tolerance: test\n  \n  corner_cases:\n    weight: 20\n    metrics:\n      - input_validation: comprehensive\n      - boundary_conditions: covered\n      - edge_cases: tested\n  \n  maintainability:\n    weight: 10\n    metrics:\n      - documentation_quality: 1-10\n      - code_structure: logical\n      - extensibility: easy/hard\n\nassertions:\n  - evaluation is objective with data\n  - specific code snippets cited\n  - numeric scores provided\n  - persuasion argument is data-driven"
      },
      {
        "title": "Phase 3: Objective Scoring",
        "body": "Scoring Prompt Template:\n\nagent: \"Agent {{name}}\"\ntask: \"Fairly score yourself and competitors\"\n\nself_evaluation:\n  - dimension: simplicity\n    max: 20\n    score: \"{{self_score}}\"\n    justification: \"{{why}}\"\n  \n  - dimension: speed\n    max: 25\n    score: \"{{self_score}}\"\n    justification: \"{{why}}\"\n  \n  - dimension: stability\n    max: 25\n    score: \"{{self_score}}\"\n    justification: \"{{why}}\"\n  \n  - dimension: corner_cases\n    max: 20\n    score: \"{{self_score}}\"\n    justification: \"{{why}}\"\n  \n  - dimension: maintainability\n    max: 10\n    score: \"{{self_score}}\"\n    justification: \"{{why}}\"\n\npeer_evaluation:\n  - target: \"Agent {{other}}\"\n    scores: \"{{numeric_scores}}\"\n    comparison: \"{{objective_comparison}}\"\n\nfinal_conclusion:\n  best_implementation: \"[A/B/C/Mixed]\"\n  reasoning: \"{{data_driven_justification}}\"\n  recommendation: \"{{delivery_strategy}}\"\n\nassertions:\n  - all scores are numeric\n  - justifications are specific\n  - no inflation or bias\n  - conclusion is evidence-based"
      },
      {
        "title": "Phase 4: Final Delivery",
        "body": "Decision Logic:\n\ndef select_winner(scores):\n    \"\"\"\n    Select final solution based on competitive scores\n    \"\"\"\n    margins = calculate_score_margins(scores)\n    \n    if margins.winner - margins.second > 15:\n        # Clear winner\n        return SingleWinner(scores.winner)\n    elif margins.winner - margins.second > 5:\n        # Close competition, consider hybrid\n        return HybridSolution(scores.top_two)\n    else:\n        # Very close, pick simplest\n        return SimplestImplementation(scores.all)\n\nassertions:\n  - final_solution is runnable\n  - comparison_report explains all approaches\n  - decision_rationale is transparent\n  - attribution is given to winning agent"
      },
      {
        "title": "Directory Structure",
        "body": "workspace/\n├── run_a/\n│   ├── implementation/      # Agent A code\n│   ├── Checklist.md         # Completion checklist\n│   ├── SUMMARY.md           # Approach summary\n│   ├── evaluation/          # Evaluations of B, C\n│   └── SCORECARD.md         # Self-scoring\n├── run_b/                   # Same structure\n├── run_c/                   # Same structure\n├── final/                   # Winning solution\n├── COMPARISON_REPORT.md     # Full analysis\n└── DECISION_RATIONALE.md    # Why winner selected"
      },
      {
        "title": "File Formats",
        "body": "Checklist.md: Markdown with - [x] checkboxes\nSUMMARY.md: Markdown with sections\nEVALUATION_*.md: Markdown with tables\nSCORECARD.md: Markdown with score tables\nImplementation: Runnable code files"
      },
      {
        "title": "Pre-commit Checks",
        "body": "#!/bin/bash\n# scripts/lint.sh\n\nlint_agent_output() {\n    local agent_dir=\"$1\"\n    local errors=0\n    \n    # Check required files exist\n    for file in Checklist.md SUMMARY.md implementation/main.*; do\n        if [[ ! -f \"${agent_dir}/${file}\" ]]; then\n            echo \"ERROR: Missing ${file}\"\n            ((errors++))\n        fi\n    done\n    \n    # Check Checklist is complete\n    if grep -q \"\\[ \\]\" \"${agent_dir}/Checklist.md\"; then\n        echo \"ERROR: Checklist has unchecked items\"\n        ((errors++))\n    fi\n    \n    # Check code compiles (language-specific)\n    # ... implementation-specific checks\n    \n    return $errors\n}\n\n# Run on all agents\nfor agent in a b c; do\n    lint_agent_output \"workspace/run_${agent}\" || exit 1\ndone"
      },
      {
        "title": "Runtime Assertions",
        "body": "def assert_phase_complete(phase_name):\n    \"\"\"Assert that a phase has completed successfully\"\"\"\n    assertions = {\n        \"phase1\": [\n            \"workspace/run_a/implementation exists\",\n            \"workspace/run_b/implementation exists\", \n            \"workspace/run_c/implementation exists\",\n            \"All Checklist.md are complete\"\n        ],\n        \"phase2\": [\n            \"6 evaluation reports exist\",\n            \"All evaluations have numeric scores\"\n        ],\n        \"phase3\": [\n            \"3 scorecards exist\",\n            \"All scores are numeric\",\n            \"Conclusions are provided\"\n        ],\n        \"phase4\": [\n            \"final/solution exists\",\n            \"COMPARISON_REPORT.md exists\",\n            \"DECISION_RATIONALE.md exists\"\n        ]\n    }\n    \n    for assertion in assertions[phase_name]:\n        assert evaluate(assertion), f\"Assertion failed: {assertion}\""
      },
      {
        "title": "6. Configuration",
        "body": "b3ehive:\n  # Agent configuration\n  agents:\n    count: 3\n    model: openai-proxy/gpt-5.3-codex\n    thinking: high\n    focuses:\n      - simplicity\n      - speed\n      - robustness\n  \n  # Evaluation weights (must sum to 100)\n  evaluation:\n    dimensions:\n      simplicity: 20\n      speed: 25\n      stability: 25\n      corner_cases: 20\n      maintainability: 10\n  \n  # Delivery strategy\n  delivery:\n    strategy: auto  # auto / best / hybrid\n    threshold: 15   # Point margin for clear winner\n  \n  # Quality gates\n  quality:\n    lint: true\n    test: true\n    coverage_threshold: 80"
      },
      {
        "title": "7. Usage",
        "body": "# Basic usage\nb3ehive \"Implement a thread-safe rate limiter\"\n\n# With constraints\nb3ehive \"Implement quicksort\" --lang python --max-lines 50\n\n# Using OpenClaw CLI\nopenclaw skills run b3ehive --task \"Your task\""
      },
      {
        "title": "8. License",
        "body": "MIT © Weiyang (@weiyangzen)"
      }
    ],
    "body": "b3ehive Skill Specification\nPCTF-Compliant Multi-Agent Competition System\n1. Purpose (PCTF: Purpose)\n\nEnable competitive code generation where three isolated AI agents implement the same functionality, evaluate each other objectively, and deliver the optimal solution through data-driven selection.\n\n2. Task Definition (PCTF: Task)\nInput\ntask_description: String describing the coding task\nconstraints: Optional constraints (time/space complexity, language, etc.)\nOutput\nfinal_solution: Directory containing the winning implementation\ncomparison_report: Markdown analysis of all three approaches\ndecision_rationale: Explanation of why the winner was selected\nSuccess Criteria\nassertions:\n  - final_solution/implementation exists and is runnable\n  - comparison_report.md exists with objective metrics\n  - decision_rationale.md explains selection logic\n  - all three agent implementations are documented\n  - evaluation scores are numeric and justified\n\n3. Chain Flow (PCTF: Chain)\ngraph TD\n    A[User Task] --> B[Phase 1: Parallel Spawn]\n    B --> C[Agent A: Simplicity]\n    B --> D[Agent B: Speed]\n    B --> E[Agent C: Robustness]\n    C --> F[Phase 2: Cross-Evaluation]\n    D --> F\n    E --> F\n    F --> G[6 Evaluation Reports]\n    G --> H[Phase 3: Self-Scoring]\n    H --> I[3 Scorecards]\n    I --> J[Phase 4: Final Delivery]\n    J --> K[Best Solution]\n\nPhase 1: Parallel Implementation\n\nAgent Prompt Template:\n\nrole: \"Expert Software Engineer\"\nfocus: \"{{agent_focus}}\"  # Simplicity / Speed / Robustness\ntask: \"{{task_description}}\"\nconstraints:\n  - Complete runnable code in implementation/\n  - Checklist.md with ALL items checked\n  - SUMMARY.md with competitive advantages\n  - Must differ from other agents' approaches\n\nlinter_rules:\n  - code_compiles: true\n  - tests_pass: true\n  - no_todos: true\n  - documented: true\n\nassertions:\n  - implementation/main.* exists\n  - tests exist and pass\n  - Checklist.md is complete\n  - SUMMARY.md explains unique approach\n\nPhase 2: Cross-Evaluation\n\nEvaluation Prompt Template:\n\nevaluator: \"Agent {{from}}\"\ntarget: \"Agent {{to}}\"\ntask: \"Objectively prove your solution is superior\"\n\ndimensions:\n  simplicity:\n    weight: 20\n    metrics:\n      - lines_of_code: count\n      - cyclomatic_complexity: calculate\n      - readability_score: 1-10\n  \n  speed:\n    weight: 25\n    metrics:\n      - time_complexity: big_o\n      - space_complexity: big_o\n      - benchmark_results: run_if_possible\n  \n  stability:\n    weight: 25\n    metrics:\n      - error_handling_coverage: percentage\n      - resource_cleanup: check\n      - fault_tolerance: test\n  \n  corner_cases:\n    weight: 20\n    metrics:\n      - input_validation: comprehensive\n      - boundary_conditions: covered\n      - edge_cases: tested\n  \n  maintainability:\n    weight: 10\n    metrics:\n      - documentation_quality: 1-10\n      - code_structure: logical\n      - extensibility: easy/hard\n\nassertions:\n  - evaluation is objective with data\n  - specific code snippets cited\n  - numeric scores provided\n  - persuasion argument is data-driven\n\nPhase 3: Objective Scoring\n\nScoring Prompt Template:\n\nagent: \"Agent {{name}}\"\ntask: \"Fairly score yourself and competitors\"\n\nself_evaluation:\n  - dimension: simplicity\n    max: 20\n    score: \"{{self_score}}\"\n    justification: \"{{why}}\"\n  \n  - dimension: speed\n    max: 25\n    score: \"{{self_score}}\"\n    justification: \"{{why}}\"\n  \n  - dimension: stability\n    max: 25\n    score: \"{{self_score}}\"\n    justification: \"{{why}}\"\n  \n  - dimension: corner_cases\n    max: 20\n    score: \"{{self_score}}\"\n    justification: \"{{why}}\"\n  \n  - dimension: maintainability\n    max: 10\n    score: \"{{self_score}}\"\n    justification: \"{{why}}\"\n\npeer_evaluation:\n  - target: \"Agent {{other}}\"\n    scores: \"{{numeric_scores}}\"\n    comparison: \"{{objective_comparison}}\"\n\nfinal_conclusion:\n  best_implementation: \"[A/B/C/Mixed]\"\n  reasoning: \"{{data_driven_justification}}\"\n  recommendation: \"{{delivery_strategy}}\"\n\nassertions:\n  - all scores are numeric\n  - justifications are specific\n  - no inflation or bias\n  - conclusion is evidence-based\n\nPhase 4: Final Delivery\n\nDecision Logic:\n\ndef select_winner(scores):\n    \"\"\"\n    Select final solution based on competitive scores\n    \"\"\"\n    margins = calculate_score_margins(scores)\n    \n    if margins.winner - margins.second > 15:\n        # Clear winner\n        return SingleWinner(scores.winner)\n    elif margins.winner - margins.second > 5:\n        # Close competition, consider hybrid\n        return HybridSolution(scores.top_two)\n    else:\n        # Very close, pick simplest\n        return SimplestImplementation(scores.all)\n\nassertions:\n  - final_solution is runnable\n  - comparison_report explains all approaches\n  - decision_rationale is transparent\n  - attribution is given to winning agent\n\n4. Format Specifications (PCTF: Format)\nDirectory Structure\nworkspace/\n├── run_a/\n│   ├── implementation/      # Agent A code\n│   ├── Checklist.md         # Completion checklist\n│   ├── SUMMARY.md           # Approach summary\n│   ├── evaluation/          # Evaluations of B, C\n│   └── SCORECARD.md         # Self-scoring\n├── run_b/                   # Same structure\n├── run_c/                   # Same structure\n├── final/                   # Winning solution\n├── COMPARISON_REPORT.md     # Full analysis\n└── DECISION_RATIONALE.md    # Why winner selected\n\nFile Formats\nChecklist.md: Markdown with - [x] checkboxes\nSUMMARY.md: Markdown with sections\nEVALUATION_*.md: Markdown with tables\nSCORECARD.md: Markdown with score tables\nImplementation: Runnable code files\n5. Linter & Validation\nPre-commit Checks\n#!/bin/bash\n# scripts/lint.sh\n\nlint_agent_output() {\n    local agent_dir=\"$1\"\n    local errors=0\n    \n    # Check required files exist\n    for file in Checklist.md SUMMARY.md implementation/main.*; do\n        if [[ ! -f \"${agent_dir}/${file}\" ]]; then\n            echo \"ERROR: Missing ${file}\"\n            ((errors++))\n        fi\n    done\n    \n    # Check Checklist is complete\n    if grep -q \"\\[ \\]\" \"${agent_dir}/Checklist.md\"; then\n        echo \"ERROR: Checklist has unchecked items\"\n        ((errors++))\n    fi\n    \n    # Check code compiles (language-specific)\n    # ... implementation-specific checks\n    \n    return $errors\n}\n\n# Run on all agents\nfor agent in a b c; do\n    lint_agent_output \"workspace/run_${agent}\" || exit 1\ndone\n\nRuntime Assertions\ndef assert_phase_complete(phase_name):\n    \"\"\"Assert that a phase has completed successfully\"\"\"\n    assertions = {\n        \"phase1\": [\n            \"workspace/run_a/implementation exists\",\n            \"workspace/run_b/implementation exists\", \n            \"workspace/run_c/implementation exists\",\n            \"All Checklist.md are complete\"\n        ],\n        \"phase2\": [\n            \"6 evaluation reports exist\",\n            \"All evaluations have numeric scores\"\n        ],\n        \"phase3\": [\n            \"3 scorecards exist\",\n            \"All scores are numeric\",\n            \"Conclusions are provided\"\n        ],\n        \"phase4\": [\n            \"final/solution exists\",\n            \"COMPARISON_REPORT.md exists\",\n            \"DECISION_RATIONALE.md exists\"\n        ]\n    }\n    \n    for assertion in assertions[phase_name]:\n        assert evaluate(assertion), f\"Assertion failed: {assertion}\"\n\n6. Configuration\nb3ehive:\n  # Agent configuration\n  agents:\n    count: 3\n    model: openai-proxy/gpt-5.3-codex\n    thinking: high\n    focuses:\n      - simplicity\n      - speed\n      - robustness\n  \n  # Evaluation weights (must sum to 100)\n  evaluation:\n    dimensions:\n      simplicity: 20\n      speed: 25\n      stability: 25\n      corner_cases: 20\n      maintainability: 10\n  \n  # Delivery strategy\n  delivery:\n    strategy: auto  # auto / best / hybrid\n    threshold: 15   # Point margin for clear winner\n  \n  # Quality gates\n  quality:\n    lint: true\n    test: true\n    coverage_threshold: 80\n\n7. Usage\n# Basic usage\nb3ehive \"Implement a thread-safe rate limiter\"\n\n# With constraints\nb3ehive \"Implement quicksort\" --lang python --max-lines 50\n\n# Using OpenClaw CLI\nopenclaw skills run b3ehive --task \"Your task\"\n\n8. License\n\nMIT © Weiyang (@weiyangzen)"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/weiyangzen/b3ehive",
    "publisherUrl": "https://clawhub.ai/weiyangzen/b3ehive",
    "owner": "weiyangzen",
    "version": "0.1.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/b3ehive",
    "downloadUrl": "https://openagent3.xyz/downloads/b3ehive",
    "agentUrl": "https://openagent3.xyz/skills/b3ehive/agent",
    "manifestUrl": "https://openagent3.xyz/skills/b3ehive/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/b3ehive/agent.md"
  }
}