{
  "schemaVersion": "1.0",
  "item": {
    "slug": "teamwork",
    "name": "TeamWork",
    "source": "tencent",
    "type": "skill",
    "category": "AI 智能",
    "sourceUrl": "https://clawhub.ai/ChenXinBest/teamwork",
    "canonicalUrl": "https://clawhub.ai/ChenXinBest/teamwork",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/teamwork",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=teamwork",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "package.json",
      "README.md",
      "SKILL.md",
      "utils/errors.js",
      "utils/helpers.js",
      "utils/index.js"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-30T16:55:25.780Z",
      "expiresAt": "2026-05-07T16:55:25.780Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
        "contentDisposition": "attachment; filename=\"network-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/teamwork"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/teamwork",
    "agentPageUrl": "https://openagent3.xyz/skills/teamwork/agent",
    "manifestUrl": "https://openagent3.xyz/skills/teamwork/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/teamwork/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Teamwork Skill",
        "body": "This skill enables dynamic team creation and management for executing complex engineering tasks through coordinated AI agents with intelligent model selection, cost optimization, and continuous performance evaluation."
      },
      {
        "title": "When to Invoke",
        "body": "Invoke this skill when:\n\nUser requests execution of complex projects requiring multiple specialized roles\nTasks need to be broken down into coordinated steps (analysis, design, implementation, testing, review)\nUser wants to leverage multiple AI models/providers for optimal cost-performance balance\nProjects require structured workflow with quality assurance and iteration"
      },
      {
        "title": "Automatic Initialization",
        "body": "IMPORTANT: This skill includes an autonomous initialization system. When invoked for the first time or when configuration is missing, it will automatically:\n\nCheck Configuration Status\n\nVerify if .trae/config/providers.json exists\nVerify if .trae/config/team-roles.json exists\nVerify if .trae/data/model_scores.json exists\n\n\n\nInteractive Setup Process\nIf configuration files are missing or incomplete, the skill will proactively ask the user:\nStep 1: Provider Setup\n\nAsk: \"Which AI providers would you like to configure? (e.g., OpenAI, Anthropic, Google, Azure, etc.)\"\nFor each provider, collect:\n\nProvider name\nAPI key (or environment variable name)\nBase URL (if custom endpoint)\n\n\n\nStep 2: Model Configuration\nFor each provider, ask:\n\n\"Which models from [provider] would you like to use?\"\nFor each model, collect:\n\nModel name/identifier\nPricing model type (subscription/tiered_usage/pay_per_use)\nPricing details based on type:\n\nSubscription: cost, start date, end date\nTiered Usage: daily quota, monthly quota, overage rate\nPay-Per-Use: input cost per 1k tokens, output cost per 1k tokens\n\n\nCapabilities (e.g., reasoning, coding, fast-response)\nMaximum concurrent tasks\n\n\n\nStep 3: Host Model Selection\n\nAsk: \"Which model should serve as the primary interface (host model)?\"\nPresent list of configured models\nUser selects one as the main interaction point\n\nStep 4: Budget Configuration\n\nAsk: \"What is your monthly budget limit? (optional)\"\nSet alert thresholds\n\n\n\nConfiguration Persistence\n\nSave all configurations to .trae/config/providers.json\nCreate default role definitions in .trae/config/team-roles.json\nInitialize empty scores database in .trae/data/model_scores.json\nConfirm successful setup with user"
      },
      {
        "title": "Configuration Management Commands",
        "body": "Users can manage their configuration at any time using these commands:\n\nView Configuration\n\nUser: \"Show me my current provider and model configuration\"\n\nResponse: Display complete configuration from .trae/config/providers.json\n\nAdd Provider\n\nUser: \"Add a new provider: [provider name]\"\n\nAction: Interactive prompts for provider details, then append to configuration\n\nAdd Model\n\nUser: \"Add model [model name] to provider [provider name]\"\n\nAction: Interactive prompts for model details, then add to provider's model list\n\nUpdate Model Pricing\n\nUser: \"Update pricing for [model name]\"\n\nAction: Ask for new pricing details and update configuration\n\nRemove Model\n\nUser: \"Remove model [model name] from provider [provider name]\"\n\nAction: Confirm and remove from configuration\n\nChange Host Model\n\nUser: \"Change the host model to [model name]\"\n\nAction: Update host_model configuration\n\nView Model Scores\n\nUser: \"Show me the performance scores for all models\"\n\nResponse: Display current model capability scores from .trae/data/model_scores.json\n\nReset Configuration\n\nUser: \"Reset all configurations to default\"\n\nAction: Confirm with user, then reinitialize"
      },
      {
        "title": "Configuration File Structure",
        "body": "Provider Configuration (.trae/config/providers.json)\n\n{\n  \"version\": \"1.0\",\n  \"last_updated\": \"2026-02-12T11:00:00Z\",\n  \"providers\": [\n    {\n      \"name\": \"openai\",\n      \"api_key\": \"${OPENAI_API_KEY}\",\n      \"base_url\": \"https://api.openai.com/v1\",\n      \"models\": [\n        {\n          \"name\": \"gpt-4\",\n          \"pricing_model\": \"pay_per_use\",\n          \"input_cost_per_1k\": 0.03,\n          \"output_cost_per_1k\": 0.06,\n          \"context_window\": 128000,\n          \"capabilities\": [\"reasoning\", \"coding\", \"analysis\"],\n          \"max_concurrent_tasks\": 3\n        }\n      ]\n    }\n  ],\n  \"host_model\": {\n    \"provider\": \"openai\",\n    \"model\": \"gpt-4\"\n  },\n  \"budget\": {\n    \"max_monthly_cost\": 100.00,\n    \"currency\": \"USD\",\n    \"alert_threshold\": 0.8\n  }\n}\n\nTeam Roles Configuration (.trae/config/team-roles.json)\n\n{\n  \"version\": \"1.0\",\n  \"last_updated\": \"2026-02-12T11:00:00Z\",\n  \"roles\": {\n    \"project_manager\": {\n      \"description\": \"Coordinates team activities and manages timeline\",\n      \"required_capabilities\": [\"planning\", \"coordination\", \"communication\"],\n      \"preferred_model_traits\": {\n        \"reliability\": \"high\",\n        \"thinking_depth\": \"medium\",\n        \"response_speed\": \"medium\"\n      }\n    }\n  }\n}\n\nModel Scores Database (.trae/data/model_scores.json)\n\n{\n  \"version\": \"1.0\",\n  \"last_updated\": \"2026-02-12T11:00:00Z\",\n  \"evaluation_interval\": 3600,\n  \"scores\": {}\n}"
      },
      {
        "title": "Initialization Checklist",
        "body": "Before executing any team task, verify:\n\n.trae/config/providers.json exists and contains at least one provider\n At least one model is configured\n Host model is designated\n .trae/config/team-roles.json exists with role definitions\n .trae/data/model_scores.json exists (can be empty initially)\n\nIf any checklist item fails, trigger interactive initialization."
      },
      {
        "title": "1. Model Performance Evaluation System",
        "body": "Multi-Dimensional Scoring\n\nAll models are periodically evaluated by peer models across multiple dimensions:\n\nEvaluation Dimensions:\n\nResponse Speed: How quickly the model responds to requests\nResponse Frequency: Rate of successful responses within time windows\nThinking Depth: Quality of reasoning and problem-solving approach\nMulti-threading Capability: Ability to handle parallel tasks\nCode Quality: Quality of generated code (for coding tasks)\nCreativity: Novelty and innovation in solutions\nReliability: Consistency in performance across sessions\nContext Understanding: Ability to maintain context over long conversations\n\nScoring Mechanism:\n\nEach model scores other models on a scale (e.g., 1-10) for each dimension\nScores are aggregated using weighted average\nEvaluations occur after each task completion\nHistorical scores are maintained with decay factor for recent performance\nFinal capability score = weighted sum of all dimension scores\n\nScore Storage:\n\n{\n  \"model_scores\": {\n    \"gpt-4\": {\n      \"response_speed\": 8.5,\n      \"response_frequency\": 9.0,\n      \"thinking_depth\": 9.5,\n      \"multi_threading\": 7.0,\n      \"code_quality\": 9.0,\n      \"creativity\": 8.5,\n      \"reliability\": 9.5,\n      \"context_understanding\": 9.0,\n      \"overall_score\": 8.75,\n      \"evaluation_count\": 42,\n      \"last_updated\": \"2026-02-12T10:30:00Z\"\n    }\n  }\n}"
      },
      {
        "title": "2. Cost Calculation System",
        "body": "Pricing Models\n\nSubscription-Based (订阅制)\n\nFixed cost for unlimited usage during subscription period\nLowest effective cost per request when fully utilized\nMarked as expired after subscription ends → excluded from team\nConfiguration:\n\n{\n  \"pricing_model\": \"subscription\",\n  \"cost\": 20.00,\n  \"currency\": \"USD\",\n  \"valid_from\": \"2026-02-01\",\n  \"valid_until\": \"2026-03-01\",\n  \"status\": \"active\"\n}\n\nTiered Usage (阶段用量制)\n\nLower cost with daily/monthly quotas\nMedium cost effectiveness\nMust monitor quota usage daily\nConfiguration:\n\n{\n  \"pricing_model\": \"tiered_usage\",\n  \"daily_quota\": 1000,\n  \"daily_used\": 450,\n  \"monthly_quota\": 30000,\n  \"monthly_used\": 12500,\n  \"cost\": 15.00,\n  \"currency\": \"USD\",\n  \"overage_rate\": 0.02\n}\n\nPay-Per-Use (用量计费制)\n\nHighest cost per request\nNo quota limits\nBest for sporadic or overflow usage\nConfiguration:\n\n{\n  \"pricing_model\": \"pay_per_use\",\n  \"input_cost_per_1k\": 0.03,\n  \"output_cost_per_1k\": 0.06,\n  \"currency\": \"USD\",\n  \"total_spent\": 2.45\n}\n\nCost Score Calculation:\n\ncost_score = (normalized_cost) * (usage_efficiency) * (availability_factor)"
      },
      {
        "title": "3. Model Availability Management",
        "body": "Status Tracking:\n\navailable: Ready to accept tasks\nbusy: Currently processing tasks\nexpired: Subscription expired\nquota_exceeded: Daily/monthly quota reached\nrate_limited: Temporarily unavailable due to rate limits\noffline: Provider API unavailable\n\nBusy State Management:\n\n{\n  \"model_status\": {\n    \"gpt-4\": {\n      \"status\": \"busy\",\n      \"current_tasks\": [\"task-123\", \"task-456\"],\n      \"max_concurrent\": 3,\n      \"estimated_free_at\": \"2026-02-12T11:00:00Z\"\n    }\n  }\n}"
      },
      {
        "title": "Phase 1: User Request & Requirement Analysis",
        "body": "Step 1.1: User submits request to Host Model (主模型)\n\nUser interacts with designated host model (primary interface)\nHost model receives and acknowledges the request\n\nStep 1.2: Host model decomposes requirements\n\nBreak down request into phases and subtasks\nIdentify dependencies between tasks\nEstimate complexity and required capabilities\nCreate initial task tree\n\nStep 1.3: User confirmation\n\nPresent task breakdown to user\nClarify ambiguities and refine requirements\nGet explicit approval to proceed\n\nOutput:\n\n{\n  \"task_id\": \"task-789\",\n  \"phases\": [\n    {\n      \"phase_id\": \"phase-1\",\n      \"name\": \"Requirement Analysis\",\n      \"subtasks\": [\n        {\n          \"subtask_id\": \"st-1\",\n          \"description\": \"Analyze user requirements\",\n          \"required_capabilities\": [\"analysis\", \"communication\"],\n          \"estimated_complexity\": \"medium\"\n        }\n      ]\n    }\n  ]\n}"
      },
      {
        "title": "Phase 2: Team Assembly Meeting",
        "body": "Step 2.1: Host model convenes all available models\n\nFilter models by status (exclude expired, offline, quota_exceeded)\nCheck busy models for potential availability\nSend meeting invitation to all eligible models\n\nStep 2.2: Task briefing\n\nHost model presents all task content to all models\nShare task breakdown, requirements, and constraints\nDistribute context and background information\n\nStep 2.3: Collaborative role definition\n\nAll models discuss and agree on required roles\nDefine capability requirements for each role\nEstimate workload for each role\nIdentify potential bottlenecks\n\nMeeting Output:\n\n{\n  \"meeting_id\": \"meeting-456\",\n  \"required_roles\": [\n    {\n      \"role_name\": \"architect\",\n      \"required_capabilities\": [\"system-design\", \"architecture\"],\n      \"estimated_workload\": \"high\",\n      \"priority\": \"critical\"\n    },\n    {\n      \"role_name\": \"developer\",\n      \"required_capabilities\": [\"coding\", \"debugging\"],\n      \"estimated_workload\": \"high\",\n      \"priority\": \"high\"\n    }\n  ],\n  \"consensus_reached\": true\n}"
      },
      {
        "title": "Phase 3: Role Assignment",
        "body": "Step 3.1: Self-nomination\n\nEach model evaluates own suitability based on:\n\nCurrent cost score\nCurrent capability score\nCurrent workload (busy status)\nRole requirements match\n\n\nModels submit role preferences\n\nStep 3.2: Conflict resolution\n\nIf multiple models want same role: democratic voting\nIf role has no candidates: negotiate with best-fit models\nBalance workload distribution across models\nAvoid concentrating all tasks on single model\n\nStep 3.3: Final assignment\n\nConfirm role assignments with all models\nDocument assignment rationale\nUpdate model busy status\n\nAssignment Algorithm:\n\nfor each role:\n  candidates = models.filter(capable_and_available)\n  if len(candidates) == 1:\n    assign to candidates[0]\n  elif len(candidates) > 1:\n    scores = calculate_combined_score(candidates, role)\n    winner = vote_among_models(candidates, scores)\n    assign to winner\n  else:\n    negotiate_with_best_available_model()\n\nCombined Score Calculation:\n\ncombined_score = (capability_score * 0.4) + \n                 (cost_efficiency_score * 0.3) + \n                 (availability_score * 0.2) + \n                 (workload_balance_factor * 0.1)"
      },
      {
        "title": "Phase 4: Herald Selection & Communication Setup",
        "body": "Step 4.1: Select Herald (传令官)\n\nChoose fastest responding model (not necessarily most capable)\nHerald acts as central communication hub\nAll models communicate through herald\n\nHerald Responsibilities:\n\nRelay messages between all team members\nDistribute progress updates\nBroadcast requirements and instructions\nCollect and aggregate results\nMonitor task completion status\nReport status to host model\nHandle timeout and failure notifications\n\nStep 4.2: Communication channels\n\nModel A → Herald → Model B\nModel A → Herald → All Models\nHerald → Host Model (status reports)\n\nHerald Configuration:\n\n{\n  \"herald\": {\n    \"model\": \"gpt-3.5-turbo\",\n    \"selection_criteria\": \"fastest_response\",\n    \"polling_interval\": 30,\n    \"timeout_threshold\": 300,\n    \"responsibilities\": [\n      \"message_relay\",\n      \"status_monitoring\",\n      \"progress_tracking\",\n      \"failure_reporting\"\n    ]\n  }\n}"
      },
      {
        "title": "Phase 5: Task Execution",
        "body": "Step 5.1: Parallel execution\n\nAssigned models work on their respective tasks\nRegular progress updates to herald\nHerald broadcasts relevant updates to team\n\nStep 5.2: Coordination\n\nHerald checks task status periodically\nIdentifies blockers and delays\nFacilitates inter-model communication\nEscalates issues to host model\n\nStep 5.3: Progress tracking\n\n{\n  \"task_progress\": {\n    \"task_id\": \"task-789\",\n    \"overall_progress\": 65,\n    \"subtask_status\": {\n      \"st-1\": \"completed\",\n      \"st-2\": \"in_progress\",\n      \"st-3\": \"pending\"\n    },\n    \"blockers\": [],\n    \"estimated_completion\": \"2026-02-12T14:00:00Z\"\n  }\n}"
      },
      {
        "title": "Phase 6: Task Completion & Review Meeting",
        "body": "Step 6.1: Completion notification\n\nHerald confirms all tasks completed\nCollects final outputs from all models\nAggregates results\n\nStep 6.2: Summary meeting\n\nHost model convenes all participating models\nEach model presents their contribution\nDiscuss challenges and solutions\nEvaluate collaboration effectiveness\n\nStep 6.3: Performance re-evaluation\n\nModels rate each other's performance\nUpdate capability scores based on task execution\nRecord role-model fit assessments\nUpdate model scores database\n\nEvaluation Form:\n\n{\n  \"evaluation\": {\n    \"evaluator\": \"gpt-4\",\n    \"evaluatee\": \"claude-3\",\n    \"task_id\": \"task-789\",\n    \"role_played\": \"developer\",\n    \"scores\": {\n      \"response_speed\": 8,\n      \"thinking_depth\": 9,\n      \"code_quality\": 9,\n      \"collaboration\": 8\n    },\n    \"role_fit\": \"excellent\",\n    \"comments\": \"Strong problem-solving skills\"\n  }\n}"
      },
      {
        "title": "Phase 7: Failure Handling & Iteration",
        "body": "Step 7.1: Failure detection\n\nHerald detects task failure or timeout\nCollects failure information from relevant models\nReports to host model with detailed context\n\nStep 7.2: Failure analysis meeting\n\nConvene all participating models\nAnalyze root cause of failure\nIdentify contributing factors\nPropose solutions\n\nStep 7.3: User consultation\n\nHost model presents failure analysis to user\nDiscuss potential solutions:\n\nRequirement changes\nApproach modifications\nTeam reconfiguration\nAdditional resources\n\n\nGet user decision on next steps\n\nStep 7.4: Iteration or termination\n\nIf user approves changes: restart from appropriate phase\nIf user terminates: document lessons learned\nUpdate model scores based on partial performance"
      },
      {
        "title": "Provider Configuration (.trae/config/providers.json)",
        "body": "{\n  \"providers\": [\n    {\n      \"name\": \"openai\",\n      \"api_key\": \"${OPENAI_API_KEY}\",\n      \"base_url\": \"https://api.openai.com/v1\",\n      \"models\": [\n        {\n          \"name\": \"gpt-4\",\n          \"pricing_model\": \"pay_per_use\",\n          \"input_cost_per_1k\": 0.03,\n          \"output_cost_per_1k\": 0.06,\n          \"context_window\": 128000,\n          \"capabilities\": [\"reasoning\", \"coding\", \"analysis\"],\n          \"max_concurrent_tasks\": 3\n        },\n        {\n          \"name\": \"gpt-3.5-turbo\",\n          \"pricing_model\": \"subscription\",\n          \"subscription_cost\": 20.00,\n          \"valid_from\": \"2026-02-01\",\n          \"valid_until\": \"2026-03-01\",\n          \"context_window\": 16385,\n          \"capabilities\": [\"fast-response\", \"coding\"],\n          \"max_concurrent_tasks\": 5\n        }\n      ]\n    },\n    {\n      \"name\": \"anthropic\",\n      \"api_key\": \"${ANTHROPIC_API_KEY}\",\n      \"base_url\": \"https://api.anthropic.com\",\n      \"models\": [\n        {\n          \"name\": \"claude-3-opus\",\n          \"pricing_model\": \"tiered_usage\",\n          \"daily_quota\": 500,\n          \"monthly_quota\": 15000,\n          \"input_cost_per_1k\": 0.015,\n          \"output_cost_per_1k\": 0.075,\n          \"context_window\": 200000,\n          \"capabilities\": [\"reasoning\", \"analysis\", \"long-context\"],\n          \"max_concurrent_tasks\": 2\n        }\n      ]\n    }\n  ],\n  \"host_model\": {\n    \"provider\": \"openai\",\n    \"model\": \"gpt-4\",\n    \"role\": \"primary_interface\"\n  },\n  \"budget\": {\n    \"max_monthly_cost\": 100.00,\n    \"currency\": \"USD\",\n    \"alert_threshold\": 0.8\n  }\n}"
      },
      {
        "title": "Team Roles Configuration (.trae/config/team-roles.json)",
        "body": "{\n  \"roles\": {\n    \"project_manager\": {\n      \"description\": \"Coordinates team activities and manages timeline\",\n      \"required_capabilities\": [\"planning\", \"coordination\", \"communication\"],\n      \"preferred_model_traits\": {\n        \"reliability\": \"high\",\n        \"thinking_depth\": \"medium\",\n        \"response_speed\": \"medium\"\n      },\n      \"typical_workload\": \"medium\"\n    },\n    \"architect\": {\n      \"description\": \"Designs system architecture and technical approach\",\n      \"required_capabilities\": [\"system-design\", \"architecture\", \"patterns\"],\n      \"preferred_model_traits\": {\n        \"thinking_depth\": \"high\",\n        \"creativity\": \"high\",\n        \"context_understanding\": \"high\"\n      },\n      \"typical_workload\": \"high\"\n    },\n    \"developer\": {\n      \"description\": \"Implements code following specifications\",\n      \"required_capabilities\": [\"coding\", \"debugging\", \"refactoring\"],\n      \"preferred_model_traits\": {\n        \"code_quality\": \"high\",\n        \"response_speed\": \"medium\",\n        \"reliability\": \"high\"\n      },\n      \"typical_workload\": \"high\"\n    },\n    \"tester\": {\n      \"description\": \"Creates and executes test suites\",\n      \"required_capabilities\": [\"testing\", \"qa\", \"validation\"],\n      \"preferred_model_traits\": {\n        \"thinking_depth\": \"medium\",\n        \"response_speed\": \"high\",\n        \"reliability\": \"high\"\n      },\n      \"typical_workload\": \"medium\"\n    },\n    \"reviewer\": {\n      \"description\": \"Performs code reviews and quality checks\",\n      \"required_capabilities\": [\"code-review\", \"best-practices\", \"security\"],\n      \"preferred_model_traits\": {\n        \"thinking_depth\": \"high\",\n        \"code_quality\": \"high\",\n        \"reliability\": \"high\"\n      },\n      \"typical_workload\": \"medium\"\n    },\n    \"analyst\": {\n      \"description\": \"Analyzes requirements and breaks down tasks\",\n      \"required_capabilities\": [\"analysis\", \"communication\", \"documentation\"],\n      \"preferred_model_traits\": {\n        \"thinking_depth\": \"high\",\n        \"context_understanding\": \"high\",\n        \"creativity\": \"medium\"\n      },\n      \"typical_workload\": \"medium\"\n    }\n  }\n}"
      },
      {
        "title": "Model Scores Database (.trae/data/model_scores.json)",
        "body": "{\n  \"last_evaluation\": \"2026-02-12T10:30:00Z\",\n  \"evaluation_interval\": 3600,\n  \"scores\": {\n    \"gpt-4\": {\n      \"dimensions\": {\n        \"response_speed\": 8.5,\n        \"response_frequency\": 9.0,\n        \"thinking_depth\": 9.5,\n        \"multi_threading\": 7.0,\n        \"code_quality\": 9.0,\n        \"creativity\": 8.5,\n        \"reliability\": 9.5,\n        \"context_understanding\": 9.0\n      },\n      \"overall_score\": 8.75,\n      \"evaluation_count\": 42,\n      \"role_fit_history\": {\n        \"architect\": 9.2,\n        \"developer\": 8.8,\n        \"reviewer\": 9.0\n      }\n    }\n  }\n}"
      },
      {
        "title": "Model Selection",
        "body": "Match model capabilities to task requirements\nConsider cost-effectiveness for routine tasks\nReserve high-capability models for complex reasoning\nDistribute workload to prevent bottlenecks"
      },
      {
        "title": "Communication",
        "body": "Keep messages concise and clear\nUse structured formats for inter-model communication\nHerald should batch non-urgent updates\nEscalate critical issues immediately"
      },
      {
        "title": "Performance Optimization",
        "body": "Cache frequently used context\nBatch similar requests when possible\nMonitor quota usage proactively\nMaintain backup models for critical roles"
      },
      {
        "title": "Quality Assurance",
        "body": "Always conduct review meetings\nUpdate scores after each task\nLearn from failures systematically\nContinuously refine role definitions"
      },
      {
        "title": "Provider Failures",
        "body": "Retry with exponential backoff\nSwitch to backup provider\nNotify team of delays\nUpdate model availability status"
      },
      {
        "title": "Task Failures",
        "body": "Capture detailed error context\nAnalyze root cause with team\nPropose remediation strategies\nConsult user for major changes"
      },
      {
        "title": "Communication Failures",
        "body": "Herald implements heartbeat checks\nFallback to direct model-to-model communication\nReassign herald if unresponsive\nLog all communication issues"
      },
      {
        "title": "Output Format",
        "body": "Final deliverables include:\n\nComplete task execution report\nTeam composition and role assignments\nIndividual model performance metrics\nCost breakdown and usage statistics\nUpdated model capability scores\nLessons learned and recommendations"
      },
      {
        "title": "Directory Structure",
        "body": ".trae/skills/teamwork/\n├── SKILL.md                    # Main skill definition (this file)\n├── scripts/                    # Execution scripts\n│   ├── init.js                # Initialization and configuration loader\n│   ├── config-manager.js      # Provider and model configuration management\n│   ├── score-manager.js       # Model performance score management\n│   ├── team-coordinator.js    # Team assembly and task coordination\n│   └── herald.js              # Communication and message relay system\n├── templates/                  # Document templates\n│   ├── task-report.md         # Task execution report template\n│   ├── meeting-minutes.md     # Meeting minutes template\n│   ├── failure-report.md      # Failure analysis report template\n│   └── evaluation-form.md     # Model evaluation form template\n├── utils/                      # Utility functions\n│   ├── index.js               # Utility exports\n│   ├── helpers.js             # General helper functions\n│   ├── logger.js              # Logging system\n│   ├── template-renderer.js   # Template rendering engine\n│   └── errors.js              # Custom error classes\n└── data/                       # Skill runtime data\n    └── (generated at runtime)"
      },
      {
        "title": "Scripts Reference",
        "body": "init.js - Initialization Module\n\nPurpose: Handles skill initialization and configuration loading.\n\nKey Functions:\n\nensureDirectories() - Create required directories\ncheckConfiguration() - Verify configuration status\ninitializeDefaultRoles() - Create default role definitions\ninitializeEmptyScores() - Initialize empty scores database\ninitializeEmptyProviders() - Initialize empty providers config\nneedsInitialization() - Check if initialization is required\nreadJSON(filePath) - Read JSON configuration file\nwriteJSON(filePath, data) - Write JSON configuration file\n\nUsage:\n\nconst init = require('./scripts/init.js');\n\n// Check if initialization needed\nif (init.needsInitialization()) {\n  init.initializeDefaultRoles();\n  init.initializeEmptyScores();\n  init.initializeEmptyProviders();\n}\n\nconfig-manager.js - Configuration Management\n\nPurpose: Manage provider and model configurations.\n\nKey Functions:\n\naddProvider(config, providerInfo) - Add new provider\naddModel(config, providerName, modelInfo) - Add model to provider\nremoveModel(config, providerName, modelName) - Remove model\nremoveProvider(config, providerName) - Remove provider\nupdateModelPricing(config, providerName, modelName, pricingInfo) - Update pricing\nsetHostModel(config, providerName, modelName) - Set host model\nsetBudget(config, budgetInfo) - Set budget limits\ngetAvailableModels(config) - Get list of available models\ngetModelStatus(model) - Get model availability status\ndisplayConfiguration(config) - Display current configuration\n\nUsage:\n\nconst configManager = require('./scripts/config-manager.js');\nconst config = init.readJSON(init.PROVIDERS_FILE);\n\n// Add new provider\nconfigManager.addProvider(config, {\n  name: 'openai',\n  api_key: '${OPENAI_API_KEY}',\n  base_url: 'https://api.openai.com/v1'\n});\n\n// Add model with subscription pricing\nconfigManager.addModel(config, 'openai', {\n  name: 'gpt-4',\n  pricing_model: 'subscription',\n  subscription_cost: 20.00,\n  valid_from: '2026-02-01',\n  valid_until: '2026-03-01',\n  capabilities: ['reasoning', 'coding']\n});\n\nscore-manager.js - Performance Score Management\n\nPurpose: Manage model performance evaluation scores.\n\nKey Functions:\n\ninitializeModelScore(scores, modelName, provider) - Initialize model scores\nupdateModelScore(scores, modelName, dimension, newScore, evaluator) - Update dimension score\ncalculateOverallScore(dimensions, weights) - Calculate weighted overall score\nupdateRoleFit(scores, modelName, roleName, fitScore) - Update role fit score\ngetTopModelsForRole(scores, roleName, topN) - Get top models for a role\ngetModelsByCapability(scores, capability, minScore) - Get models by capability\nrecordEvaluation(scores, evaluation) - Record complete evaluation\ndisplayScores(scores) - Display all model scores\n\nUsage:\n\nconst scoreManager = require('./scripts/score-manager.js');\n\n// Record evaluation\nscoreManager.recordEvaluation(scores, {\n  evaluator: 'gpt-4',\n  evaluatee: 'claude-3',\n  task_id: 'task-123',\n  role_played: 'developer',\n  scores: {\n    response_speed: 8,\n    thinking_depth: 9,\n    code_quality: 9\n  },\n  role_fit: 'excellent'\n});\n\n// Get top models for architect role\nconst topArchitects = scoreManager.getTopModelsForRole(scores, 'architect', 3);\n\nteam-coordinator.js - Team Coordination\n\nPurpose: Coordinate team assembly and task execution.\n\nKey Class: TeamCoordinator\n\nMethods:\n\nload() - Load configurations\ngetAvailableModels() - Get available models list\nselectHerald() - Select fastest model as herald\nassignRoles(requiredRoles) - Assign roles to models\ncalculateCombinedScore(model, roleFit) - Calculate selection score\ncreateTaskPlan(userRequest) - Create task execution plan\ngenerateMeetingAgenda(meetingType) - Generate meeting agenda\ngenerateEvaluationForms() - Generate peer evaluation forms\ngenerateReport() - Generate task report\n\nUsage:\n\nconst TeamCoordinator = require('./scripts/team-coordinator.js');\n\nconst coordinator = new TeamCoordinator();\ncoordinator.load();\n\n// Select herald\nconst herald = coordinator.selectHerald();\n\n// Assign roles\nconst assignments = coordinator.assignRoles(['architect', 'developer', 'tester']);\n\n// Create task plan\nconst plan = coordinator.createTaskPlan('Build a REST API');\n\nherald.js - Communication System\n\nPurpose: Manage inter-model communication and coordination.\n\nKey Class: Herald\n\nMethods:\n\ninitializeTeam(team) - Initialize team status tracking\nbroadcast(message, excludeSender) - Broadcast message to all\nsendDirectMessage(to, message, from) - Send direct message\nupdateProgress(model, subtaskId, progress, status) - Update task progress\ngetTeamStatus() - Get current team status\ncheckTimeouts() - Check for timeout conditions\npollTeam() - Request status from all members\nreportToHost(status) - Send status report to host\nnotifyFailure(model, error, context) - Notify failure\nnotifyCompletion(model, result) - Notify completion\ngetOverallProgress() - Get overall task progress\n\nUsage:\n\nconst Herald = require('./scripts/herald.js');\n\nconst herald = new Herald('gpt-3.5-turbo', 'openai');\nherald.initializeTeam(team);\n\n// Broadcast update\nherald.broadcast({ type: 'task_update', content: 'Phase 1 complete' });\n\n// Check progress\nconst progress = herald.getOverallProgress();"
      },
      {
        "title": "Templates Reference",
        "body": "task-report.md - Task Execution Report\n\nPurpose: Document complete task execution details.\n\nVariables:\n\ntask_id - Unique task identifier\ntimestamp - Report generation time\nstatus - Task status (completed/failed/in_progress)\nsummary - Executive summary\nteam_members - Array of team member details\nphases - Array of execution phases\nmodel_metrics - Performance metrics per model\ntotal_cost - Total execution cost\ndeliverables - Array of deliverables\nlessons - Lessons learned\nrecommendations - Recommendations\nscore_updates - Model score updates\n\nUsage:\n\nconst { renderTemplateFromFile } = require('./utils/template-renderer.js');\n\nconst report = renderTemplateFromFile('task-report.md', {\n  task_id: 'task-123',\n  timestamp: new Date().toISOString(),\n  status: 'completed',\n  summary: 'Successfully implemented REST API',\n  team_members: [...],\n  phases: [...]\n});\n\nmeeting-minutes.md - Meeting Documentation\n\nPurpose: Document team meeting discussions and decisions.\n\nVariables:\n\nmeeting_id - Unique meeting identifier\nmeeting_type - Type of meeting\ndate - Meeting date\nduration - Meeting duration\nparticipants - Array of participants\nagenda_items - Meeting agenda\nvoting_results - Voting results (if applicable)\naction_items - Action items from meeting\nnext_steps - Next steps to take\n\nfailure-report.md - Failure Analysis\n\nPurpose: Document and analyze task failures.\n\nVariables:\n\ntask_id - Failed task identifier\nfailure_time - Time of failure\nfailure_type - Type of failure\nseverity - Failure severity\ntimeline - Timeline of events\nprimary_cause - Root cause\ncontributing_factors - Contributing factors\nrecovery_actions - Actions taken for recovery\nrecommendations - Recommendations to prevent recurrence\n\nevaluation-form.md - Model Evaluation\n\nPurpose: Document peer model evaluations.\n\nVariables:\n\nevaluator_model - Evaluating model\nevaluatee_model - Model being evaluated\ntask_id - Related task\nrole_played - Role in task\nresponse_speed through context_understanding - Dimension scores\nrole_fit - Overall role fit assessment\nstrengths - Model strengths\nimprovements - Areas for improvement"
      },
      {
        "title": "Utilities Reference",
        "body": "helpers.js - General Utilities\n\nFunctions:\n\ngenerateId(prefix) - Generate unique identifier\nformatDate(date) - Format date to ISO string\nformatDuration(ms) - Format milliseconds to readable duration\ncalculateCost(model, inputTokens, outputTokens) - Calculate API cost\ndeepClone(obj) - Deep clone object\nmergeObjects(target, source) - Deep merge objects\nretryWithBackoff(fn, maxRetries, delay) - Retry with exponential backoff\nchunkArray(array, size) - Split array into chunks\ngroupBy(array, key) - Group array by key\nsortBy(array, key, order) - Sort array by key\nuniqueBy(array, key) - Remove duplicates by key\n\nlogger.js - Logging System\n\nClasses: Logger\n\nLog Levels: DEBUG, INFO, WARN, ERROR\n\nMethods:\n\ndebug(message, data) - Log debug message\ninfo(message, data) - Log info message\nwarn(message, data) - Log warning message\nerror(message, data) - Log error message\nsetLevel(level) - Set log level\nsetLogFile(filePath) - Set log file path\n\nUsage:\n\nconst { createLogger, LOG_LEVELS } = require('./utils/logger.js');\n\nconst logger = createLogger('teamwork', { \n  level: LOG_LEVELS.DEBUG,\n  console: true \n});\n\nlogger.info('Task started', { task_id: 'task-123' });\n\nerrors.js - Custom Errors\n\nError Classes:\n\nValidationError - Input validation errors\nConfigurationError - Configuration errors\nModelNotFoundError - Model not found errors\nProviderNotFoundError - Provider not found errors\nTaskExecutionError - Task execution errors\nTimeoutError - Timeout errors\nBudgetExceededError - Budget exceeded errors\nQuotaExceededError - Quota exceeded errors\nHeraldError - Herald communication errors\n\nFunctions:\n\nhandleError(error, logger) - Standardized error handling\nisRecoverable(error) - Check if error is recoverable"
      },
      {
        "title": "Quick Start",
        "body": "// 1. Initialize skill\nconst init = require('./scripts/init.js');\nif (init.needsInitialization()) {\n  // Run interactive setup\n  init.initializeDefaultRoles();\n  init.initializeEmptyScores();\n  init.initializeEmptyProviders();\n}\n\n// 2. Configure providers\nconst configManager = require('./scripts/config-manager.js');\nconst config = init.readJSON(init.PROVIDERS_FILE);\n\nconfigManager.addProvider(config, { name: 'openai' });\nconfigManager.addModel(config, 'openai', {\n  name: 'gpt-4',\n  pricing_model: 'pay_per_use',\n  input_cost_per_1k: 0.03,\n  output_cost_per_1k: 0.06,\n  capabilities: ['reasoning', 'coding']\n});\nconfigManager.setHostModel(config, 'openai', 'gpt-4');\ninit.writeJSON(init.PROVIDERS_FILE, config);\n\n// 3. Create team and execute task\nconst TeamCoordinator = require('./scripts/team-coordinator.js');\nconst coordinator = new TeamCoordinator();\ncoordinator.load();\n\nconst herald = coordinator.selectHerald();\nconst team = coordinator.assignRoles(['architect', 'developer', 'tester']);\nconst plan = coordinator.createTaskPlan('Build REST API');\n\n// 4. Execute with herald coordination\nconst Herald = require('./scripts/herald.js');\nconst heraldInstance = new Herald(herald.model, herald.provider);\nheraldInstance.initializeTeam(team);\n\n// 5. Record evaluations and update scores\nconst scoreManager = require('./scripts/score-manager.js');\nconst scores = init.readJSON(init.SCORES_FILE);\n\nscoreManager.recordEvaluation(scores, {\n  evaluator: 'gpt-4',\n  evaluatee: 'claude-3',\n  task_id: plan.task_id,\n  role_played: 'developer',\n  scores: { response_speed: 8, thinking_depth: 9, code_quality: 9 },\n  role_fit: 'excellent'\n});\n\ninit.writeJSON(init.SCORES_FILE, scores);"
      },
      {
        "title": "Version History",
        "body": "v1.0.0 (2026-02-12): Initial release with full feature set\n\nMulti-provider support\nThree pricing models\n8-dimension performance evaluation\nHerald communication system\nComplete workflow management\nTemplate-based reporting"
      }
    ],
    "body": "Teamwork Skill\n\nThis skill enables dynamic team creation and management for executing complex engineering tasks through coordinated AI agents with intelligent model selection, cost optimization, and continuous performance evaluation.\n\nWhen to Invoke\n\nInvoke this skill when:\n\nUser requests execution of complex projects requiring multiple specialized roles\nTasks need to be broken down into coordinated steps (analysis, design, implementation, testing, review)\nUser wants to leverage multiple AI models/providers for optimal cost-performance balance\nProjects require structured workflow with quality assurance and iteration\nInitialization & Configuration Management\nAutomatic Initialization\n\nIMPORTANT: This skill includes an autonomous initialization system. When invoked for the first time or when configuration is missing, it will automatically:\n\nCheck Configuration Status\n\nVerify if .trae/config/providers.json exists\nVerify if .trae/config/team-roles.json exists\nVerify if .trae/data/model_scores.json exists\n\nInteractive Setup Process If configuration files are missing or incomplete, the skill will proactively ask the user:\n\nStep 1: Provider Setup\n\nAsk: \"Which AI providers would you like to configure? (e.g., OpenAI, Anthropic, Google, Azure, etc.)\"\nFor each provider, collect:\nProvider name\nAPI key (or environment variable name)\nBase URL (if custom endpoint)\n\nStep 2: Model Configuration For each provider, ask:\n\n\"Which models from [provider] would you like to use?\"\nFor each model, collect:\nModel name/identifier\nPricing model type (subscription/tiered_usage/pay_per_use)\nPricing details based on type:\nSubscription: cost, start date, end date\nTiered Usage: daily quota, monthly quota, overage rate\nPay-Per-Use: input cost per 1k tokens, output cost per 1k tokens\nCapabilities (e.g., reasoning, coding, fast-response)\nMaximum concurrent tasks\n\nStep 3: Host Model Selection\n\nAsk: \"Which model should serve as the primary interface (host model)?\"\nPresent list of configured models\nUser selects one as the main interaction point\n\nStep 4: Budget Configuration\n\nAsk: \"What is your monthly budget limit? (optional)\"\nSet alert thresholds\n\nConfiguration Persistence\n\nSave all configurations to .trae/config/providers.json\nCreate default role definitions in .trae/config/team-roles.json\nInitialize empty scores database in .trae/data/model_scores.json\nConfirm successful setup with user\nConfiguration Management Commands\n\nUsers can manage their configuration at any time using these commands:\n\nView Configuration\n\nUser: \"Show me my current provider and model configuration\"\n\n\nResponse: Display complete configuration from .trae/config/providers.json\n\nAdd Provider\n\nUser: \"Add a new provider: [provider name]\"\n\n\nAction: Interactive prompts for provider details, then append to configuration\n\nAdd Model\n\nUser: \"Add model [model name] to provider [provider name]\"\n\n\nAction: Interactive prompts for model details, then add to provider's model list\n\nUpdate Model Pricing\n\nUser: \"Update pricing for [model name]\"\n\n\nAction: Ask for new pricing details and update configuration\n\nRemove Model\n\nUser: \"Remove model [model name] from provider [provider name]\"\n\n\nAction: Confirm and remove from configuration\n\nChange Host Model\n\nUser: \"Change the host model to [model name]\"\n\n\nAction: Update host_model configuration\n\nView Model Scores\n\nUser: \"Show me the performance scores for all models\"\n\n\nResponse: Display current model capability scores from .trae/data/model_scores.json\n\nReset Configuration\n\nUser: \"Reset all configurations to default\"\n\n\nAction: Confirm with user, then reinitialize\n\nConfiguration File Structure\n\nProvider Configuration (.trae/config/providers.json)\n\n{\n  \"version\": \"1.0\",\n  \"last_updated\": \"2026-02-12T11:00:00Z\",\n  \"providers\": [\n    {\n      \"name\": \"openai\",\n      \"api_key\": \"${OPENAI_API_KEY}\",\n      \"base_url\": \"https://api.openai.com/v1\",\n      \"models\": [\n        {\n          \"name\": \"gpt-4\",\n          \"pricing_model\": \"pay_per_use\",\n          \"input_cost_per_1k\": 0.03,\n          \"output_cost_per_1k\": 0.06,\n          \"context_window\": 128000,\n          \"capabilities\": [\"reasoning\", \"coding\", \"analysis\"],\n          \"max_concurrent_tasks\": 3\n        }\n      ]\n    }\n  ],\n  \"host_model\": {\n    \"provider\": \"openai\",\n    \"model\": \"gpt-4\"\n  },\n  \"budget\": {\n    \"max_monthly_cost\": 100.00,\n    \"currency\": \"USD\",\n    \"alert_threshold\": 0.8\n  }\n}\n\n\nTeam Roles Configuration (.trae/config/team-roles.json)\n\n{\n  \"version\": \"1.0\",\n  \"last_updated\": \"2026-02-12T11:00:00Z\",\n  \"roles\": {\n    \"project_manager\": {\n      \"description\": \"Coordinates team activities and manages timeline\",\n      \"required_capabilities\": [\"planning\", \"coordination\", \"communication\"],\n      \"preferred_model_traits\": {\n        \"reliability\": \"high\",\n        \"thinking_depth\": \"medium\",\n        \"response_speed\": \"medium\"\n      }\n    }\n  }\n}\n\n\nModel Scores Database (.trae/data/model_scores.json)\n\n{\n  \"version\": \"1.0\",\n  \"last_updated\": \"2026-02-12T11:00:00Z\",\n  \"evaluation_interval\": 3600,\n  \"scores\": {}\n}\n\nInitialization Checklist\n\nBefore executing any team task, verify:\n\n .trae/config/providers.json exists and contains at least one provider\n At least one model is configured\n Host model is designated\n .trae/config/team-roles.json exists with role definitions\n .trae/data/model_scores.json exists (can be empty initially)\n\nIf any checklist item fails, trigger interactive initialization.\n\nCore System Components\n1. Model Performance Evaluation System\nMulti-Dimensional Scoring\n\nAll models are periodically evaluated by peer models across multiple dimensions:\n\nEvaluation Dimensions:\n\nResponse Speed: How quickly the model responds to requests\nResponse Frequency: Rate of successful responses within time windows\nThinking Depth: Quality of reasoning and problem-solving approach\nMulti-threading Capability: Ability to handle parallel tasks\nCode Quality: Quality of generated code (for coding tasks)\nCreativity: Novelty and innovation in solutions\nReliability: Consistency in performance across sessions\nContext Understanding: Ability to maintain context over long conversations\n\nScoring Mechanism:\n\nEach model scores other models on a scale (e.g., 1-10) for each dimension\nScores are aggregated using weighted average\nEvaluations occur after each task completion\nHistorical scores are maintained with decay factor for recent performance\nFinal capability score = weighted sum of all dimension scores\n\nScore Storage:\n\n{\n  \"model_scores\": {\n    \"gpt-4\": {\n      \"response_speed\": 8.5,\n      \"response_frequency\": 9.0,\n      \"thinking_depth\": 9.5,\n      \"multi_threading\": 7.0,\n      \"code_quality\": 9.0,\n      \"creativity\": 8.5,\n      \"reliability\": 9.5,\n      \"context_understanding\": 9.0,\n      \"overall_score\": 8.75,\n      \"evaluation_count\": 42,\n      \"last_updated\": \"2026-02-12T10:30:00Z\"\n    }\n  }\n}\n\n2. Cost Calculation System\nPricing Models\n\nSubscription-Based (订阅制)\n\nFixed cost for unlimited usage during subscription period\nLowest effective cost per request when fully utilized\nMarked as expired after subscription ends → excluded from team\nConfiguration:\n{\n  \"pricing_model\": \"subscription\",\n  \"cost\": 20.00,\n  \"currency\": \"USD\",\n  \"valid_from\": \"2026-02-01\",\n  \"valid_until\": \"2026-03-01\",\n  \"status\": \"active\"\n}\n\n\nTiered Usage (阶段用量制)\n\nLower cost with daily/monthly quotas\nMedium cost effectiveness\nMust monitor quota usage daily\nConfiguration:\n{\n  \"pricing_model\": \"tiered_usage\",\n  \"daily_quota\": 1000,\n  \"daily_used\": 450,\n  \"monthly_quota\": 30000,\n  \"monthly_used\": 12500,\n  \"cost\": 15.00,\n  \"currency\": \"USD\",\n  \"overage_rate\": 0.02\n}\n\n\nPay-Per-Use (用量计费制)\n\nHighest cost per request\nNo quota limits\nBest for sporadic or overflow usage\nConfiguration:\n{\n  \"pricing_model\": \"pay_per_use\",\n  \"input_cost_per_1k\": 0.03,\n  \"output_cost_per_1k\": 0.06,\n  \"currency\": \"USD\",\n  \"total_spent\": 2.45\n}\n\n\nCost Score Calculation:\n\ncost_score = (normalized_cost) * (usage_efficiency) * (availability_factor)\n\n3. Model Availability Management\n\nStatus Tracking:\n\navailable: Ready to accept tasks\nbusy: Currently processing tasks\nexpired: Subscription expired\nquota_exceeded: Daily/monthly quota reached\nrate_limited: Temporarily unavailable due to rate limits\noffline: Provider API unavailable\n\nBusy State Management:\n\n{\n  \"model_status\": {\n    \"gpt-4\": {\n      \"status\": \"busy\",\n      \"current_tasks\": [\"task-123\", \"task-456\"],\n      \"max_concurrent\": 3,\n      \"estimated_free_at\": \"2026-02-12T11:00:00Z\"\n    }\n  }\n}\n\nTask Execution Workflow\nPhase 1: User Request & Requirement Analysis\n\nStep 1.1: User submits request to Host Model (主模型)\n\nUser interacts with designated host model (primary interface)\nHost model receives and acknowledges the request\n\nStep 1.2: Host model decomposes requirements\n\nBreak down request into phases and subtasks\nIdentify dependencies between tasks\nEstimate complexity and required capabilities\nCreate initial task tree\n\nStep 1.3: User confirmation\n\nPresent task breakdown to user\nClarify ambiguities and refine requirements\nGet explicit approval to proceed\n\nOutput:\n\n{\n  \"task_id\": \"task-789\",\n  \"phases\": [\n    {\n      \"phase_id\": \"phase-1\",\n      \"name\": \"Requirement Analysis\",\n      \"subtasks\": [\n        {\n          \"subtask_id\": \"st-1\",\n          \"description\": \"Analyze user requirements\",\n          \"required_capabilities\": [\"analysis\", \"communication\"],\n          \"estimated_complexity\": \"medium\"\n        }\n      ]\n    }\n  ]\n}\n\nPhase 2: Team Assembly Meeting\n\nStep 2.1: Host model convenes all available models\n\nFilter models by status (exclude expired, offline, quota_exceeded)\nCheck busy models for potential availability\nSend meeting invitation to all eligible models\n\nStep 2.2: Task briefing\n\nHost model presents all task content to all models\nShare task breakdown, requirements, and constraints\nDistribute context and background information\n\nStep 2.3: Collaborative role definition\n\nAll models discuss and agree on required roles\nDefine capability requirements for each role\nEstimate workload for each role\nIdentify potential bottlenecks\n\nMeeting Output:\n\n{\n  \"meeting_id\": \"meeting-456\",\n  \"required_roles\": [\n    {\n      \"role_name\": \"architect\",\n      \"required_capabilities\": [\"system-design\", \"architecture\"],\n      \"estimated_workload\": \"high\",\n      \"priority\": \"critical\"\n    },\n    {\n      \"role_name\": \"developer\",\n      \"required_capabilities\": [\"coding\", \"debugging\"],\n      \"estimated_workload\": \"high\",\n      \"priority\": \"high\"\n    }\n  ],\n  \"consensus_reached\": true\n}\n\nPhase 3: Role Assignment\n\nStep 3.1: Self-nomination\n\nEach model evaluates own suitability based on:\nCurrent cost score\nCurrent capability score\nCurrent workload (busy status)\nRole requirements match\nModels submit role preferences\n\nStep 3.2: Conflict resolution\n\nIf multiple models want same role: democratic voting\nIf role has no candidates: negotiate with best-fit models\nBalance workload distribution across models\nAvoid concentrating all tasks on single model\n\nStep 3.3: Final assignment\n\nConfirm role assignments with all models\nDocument assignment rationale\nUpdate model busy status\n\nAssignment Algorithm:\n\nfor each role:\n  candidates = models.filter(capable_and_available)\n  if len(candidates) == 1:\n    assign to candidates[0]\n  elif len(candidates) > 1:\n    scores = calculate_combined_score(candidates, role)\n    winner = vote_among_models(candidates, scores)\n    assign to winner\n  else:\n    negotiate_with_best_available_model()\n\n\nCombined Score Calculation:\n\ncombined_score = (capability_score * 0.4) + \n                 (cost_efficiency_score * 0.3) + \n                 (availability_score * 0.2) + \n                 (workload_balance_factor * 0.1)\n\nPhase 4: Herald Selection & Communication Setup\n\nStep 4.1: Select Herald (传令官)\n\nChoose fastest responding model (not necessarily most capable)\nHerald acts as central communication hub\nAll models communicate through herald\n\nHerald Responsibilities:\n\nRelay messages between all team members\nDistribute progress updates\nBroadcast requirements and instructions\nCollect and aggregate results\nMonitor task completion status\nReport status to host model\nHandle timeout and failure notifications\n\nStep 4.2: Communication channels\n\nModel A → Herald → Model B\nModel A → Herald → All Models\nHerald → Host Model (status reports)\n\n\nHerald Configuration:\n\n{\n  \"herald\": {\n    \"model\": \"gpt-3.5-turbo\",\n    \"selection_criteria\": \"fastest_response\",\n    \"polling_interval\": 30,\n    \"timeout_threshold\": 300,\n    \"responsibilities\": [\n      \"message_relay\",\n      \"status_monitoring\",\n      \"progress_tracking\",\n      \"failure_reporting\"\n    ]\n  }\n}\n\nPhase 5: Task Execution\n\nStep 5.1: Parallel execution\n\nAssigned models work on their respective tasks\nRegular progress updates to herald\nHerald broadcasts relevant updates to team\n\nStep 5.2: Coordination\n\nHerald checks task status periodically\nIdentifies blockers and delays\nFacilitates inter-model communication\nEscalates issues to host model\n\nStep 5.3: Progress tracking\n\n{\n  \"task_progress\": {\n    \"task_id\": \"task-789\",\n    \"overall_progress\": 65,\n    \"subtask_status\": {\n      \"st-1\": \"completed\",\n      \"st-2\": \"in_progress\",\n      \"st-3\": \"pending\"\n    },\n    \"blockers\": [],\n    \"estimated_completion\": \"2026-02-12T14:00:00Z\"\n  }\n}\n\nPhase 6: Task Completion & Review Meeting\n\nStep 6.1: Completion notification\n\nHerald confirms all tasks completed\nCollects final outputs from all models\nAggregates results\n\nStep 6.2: Summary meeting\n\nHost model convenes all participating models\nEach model presents their contribution\nDiscuss challenges and solutions\nEvaluate collaboration effectiveness\n\nStep 6.3: Performance re-evaluation\n\nModels rate each other's performance\nUpdate capability scores based on task execution\nRecord role-model fit assessments\nUpdate model scores database\n\nEvaluation Form:\n\n{\n  \"evaluation\": {\n    \"evaluator\": \"gpt-4\",\n    \"evaluatee\": \"claude-3\",\n    \"task_id\": \"task-789\",\n    \"role_played\": \"developer\",\n    \"scores\": {\n      \"response_speed\": 8,\n      \"thinking_depth\": 9,\n      \"code_quality\": 9,\n      \"collaboration\": 8\n    },\n    \"role_fit\": \"excellent\",\n    \"comments\": \"Strong problem-solving skills\"\n  }\n}\n\nPhase 7: Failure Handling & Iteration\n\nStep 7.1: Failure detection\n\nHerald detects task failure or timeout\nCollects failure information from relevant models\nReports to host model with detailed context\n\nStep 7.2: Failure analysis meeting\n\nConvene all participating models\nAnalyze root cause of failure\nIdentify contributing factors\nPropose solutions\n\nStep 7.3: User consultation\n\nHost model presents failure analysis to user\nDiscuss potential solutions:\nRequirement changes\nApproach modifications\nTeam reconfiguration\nAdditional resources\nGet user decision on next steps\n\nStep 7.4: Iteration or termination\n\nIf user approves changes: restart from appropriate phase\nIf user terminates: document lessons learned\nUpdate model scores based on partial performance\nConfiguration Files\nProvider Configuration (.trae/config/providers.json)\n{\n  \"providers\": [\n    {\n      \"name\": \"openai\",\n      \"api_key\": \"${OPENAI_API_KEY}\",\n      \"base_url\": \"https://api.openai.com/v1\",\n      \"models\": [\n        {\n          \"name\": \"gpt-4\",\n          \"pricing_model\": \"pay_per_use\",\n          \"input_cost_per_1k\": 0.03,\n          \"output_cost_per_1k\": 0.06,\n          \"context_window\": 128000,\n          \"capabilities\": [\"reasoning\", \"coding\", \"analysis\"],\n          \"max_concurrent_tasks\": 3\n        },\n        {\n          \"name\": \"gpt-3.5-turbo\",\n          \"pricing_model\": \"subscription\",\n          \"subscription_cost\": 20.00,\n          \"valid_from\": \"2026-02-01\",\n          \"valid_until\": \"2026-03-01\",\n          \"context_window\": 16385,\n          \"capabilities\": [\"fast-response\", \"coding\"],\n          \"max_concurrent_tasks\": 5\n        }\n      ]\n    },\n    {\n      \"name\": \"anthropic\",\n      \"api_key\": \"${ANTHROPIC_API_KEY}\",\n      \"base_url\": \"https://api.anthropic.com\",\n      \"models\": [\n        {\n          \"name\": \"claude-3-opus\",\n          \"pricing_model\": \"tiered_usage\",\n          \"daily_quota\": 500,\n          \"monthly_quota\": 15000,\n          \"input_cost_per_1k\": 0.015,\n          \"output_cost_per_1k\": 0.075,\n          \"context_window\": 200000,\n          \"capabilities\": [\"reasoning\", \"analysis\", \"long-context\"],\n          \"max_concurrent_tasks\": 2\n        }\n      ]\n    }\n  ],\n  \"host_model\": {\n    \"provider\": \"openai\",\n    \"model\": \"gpt-4\",\n    \"role\": \"primary_interface\"\n  },\n  \"budget\": {\n    \"max_monthly_cost\": 100.00,\n    \"currency\": \"USD\",\n    \"alert_threshold\": 0.8\n  }\n}\n\nTeam Roles Configuration (.trae/config/team-roles.json)\n{\n  \"roles\": {\n    \"project_manager\": {\n      \"description\": \"Coordinates team activities and manages timeline\",\n      \"required_capabilities\": [\"planning\", \"coordination\", \"communication\"],\n      \"preferred_model_traits\": {\n        \"reliability\": \"high\",\n        \"thinking_depth\": \"medium\",\n        \"response_speed\": \"medium\"\n      },\n      \"typical_workload\": \"medium\"\n    },\n    \"architect\": {\n      \"description\": \"Designs system architecture and technical approach\",\n      \"required_capabilities\": [\"system-design\", \"architecture\", \"patterns\"],\n      \"preferred_model_traits\": {\n        \"thinking_depth\": \"high\",\n        \"creativity\": \"high\",\n        \"context_understanding\": \"high\"\n      },\n      \"typical_workload\": \"high\"\n    },\n    \"developer\": {\n      \"description\": \"Implements code following specifications\",\n      \"required_capabilities\": [\"coding\", \"debugging\", \"refactoring\"],\n      \"preferred_model_traits\": {\n        \"code_quality\": \"high\",\n        \"response_speed\": \"medium\",\n        \"reliability\": \"high\"\n      },\n      \"typical_workload\": \"high\"\n    },\n    \"tester\": {\n      \"description\": \"Creates and executes test suites\",\n      \"required_capabilities\": [\"testing\", \"qa\", \"validation\"],\n      \"preferred_model_traits\": {\n        \"thinking_depth\": \"medium\",\n        \"response_speed\": \"high\",\n        \"reliability\": \"high\"\n      },\n      \"typical_workload\": \"medium\"\n    },\n    \"reviewer\": {\n      \"description\": \"Performs code reviews and quality checks\",\n      \"required_capabilities\": [\"code-review\", \"best-practices\", \"security\"],\n      \"preferred_model_traits\": {\n        \"thinking_depth\": \"high\",\n        \"code_quality\": \"high\",\n        \"reliability\": \"high\"\n      },\n      \"typical_workload\": \"medium\"\n    },\n    \"analyst\": {\n      \"description\": \"Analyzes requirements and breaks down tasks\",\n      \"required_capabilities\": [\"analysis\", \"communication\", \"documentation\"],\n      \"preferred_model_traits\": {\n        \"thinking_depth\": \"high\",\n        \"context_understanding\": \"high\",\n        \"creativity\": \"medium\"\n      },\n      \"typical_workload\": \"medium\"\n    }\n  }\n}\n\nModel Scores Database (.trae/data/model_scores.json)\n{\n  \"last_evaluation\": \"2026-02-12T10:30:00Z\",\n  \"evaluation_interval\": 3600,\n  \"scores\": {\n    \"gpt-4\": {\n      \"dimensions\": {\n        \"response_speed\": 8.5,\n        \"response_frequency\": 9.0,\n        \"thinking_depth\": 9.5,\n        \"multi_threading\": 7.0,\n        \"code_quality\": 9.0,\n        \"creativity\": 8.5,\n        \"reliability\": 9.5,\n        \"context_understanding\": 9.0\n      },\n      \"overall_score\": 8.75,\n      \"evaluation_count\": 42,\n      \"role_fit_history\": {\n        \"architect\": 9.2,\n        \"developer\": 8.8,\n        \"reviewer\": 9.0\n      }\n    }\n  }\n}\n\nBest Practices\nModel Selection\nMatch model capabilities to task requirements\nConsider cost-effectiveness for routine tasks\nReserve high-capability models for complex reasoning\nDistribute workload to prevent bottlenecks\nCommunication\nKeep messages concise and clear\nUse structured formats for inter-model communication\nHerald should batch non-urgent updates\nEscalate critical issues immediately\nPerformance Optimization\nCache frequently used context\nBatch similar requests when possible\nMonitor quota usage proactively\nMaintain backup models for critical roles\nQuality Assurance\nAlways conduct review meetings\nUpdate scores after each task\nLearn from failures systematically\nContinuously refine role definitions\nError Handling\nProvider Failures\nRetry with exponential backoff\nSwitch to backup provider\nNotify team of delays\nUpdate model availability status\nTask Failures\nCapture detailed error context\nAnalyze root cause with team\nPropose remediation strategies\nConsult user for major changes\nCommunication Failures\nHerald implements heartbeat checks\nFallback to direct model-to-model communication\nReassign herald if unresponsive\nLog all communication issues\nOutput Format\n\nFinal deliverables include:\n\nComplete task execution report\nTeam composition and role assignments\nIndividual model performance metrics\nCost breakdown and usage statistics\nUpdated model capability scores\nLessons learned and recommendations\nSkill Structure & Components\nDirectory Structure\n.trae/skills/teamwork/\n├── SKILL.md                    # Main skill definition (this file)\n├── scripts/                    # Execution scripts\n│   ├── init.js                # Initialization and configuration loader\n│   ├── config-manager.js      # Provider and model configuration management\n│   ├── score-manager.js       # Model performance score management\n│   ├── team-coordinator.js    # Team assembly and task coordination\n│   └── herald.js              # Communication and message relay system\n├── templates/                  # Document templates\n│   ├── task-report.md         # Task execution report template\n│   ├── meeting-minutes.md     # Meeting minutes template\n│   ├── failure-report.md      # Failure analysis report template\n│   └── evaluation-form.md     # Model evaluation form template\n├── utils/                      # Utility functions\n│   ├── index.js               # Utility exports\n│   ├── helpers.js             # General helper functions\n│   ├── logger.js              # Logging system\n│   ├── template-renderer.js   # Template rendering engine\n│   └── errors.js              # Custom error classes\n└── data/                       # Skill runtime data\n    └── (generated at runtime)\n\nScripts Reference\ninit.js - Initialization Module\n\nPurpose: Handles skill initialization and configuration loading.\n\nKey Functions:\n\nensureDirectories() - Create required directories\ncheckConfiguration() - Verify configuration status\ninitializeDefaultRoles() - Create default role definitions\ninitializeEmptyScores() - Initialize empty scores database\ninitializeEmptyProviders() - Initialize empty providers config\nneedsInitialization() - Check if initialization is required\nreadJSON(filePath) - Read JSON configuration file\nwriteJSON(filePath, data) - Write JSON configuration file\n\nUsage:\n\nconst init = require('./scripts/init.js');\n\n// Check if initialization needed\nif (init.needsInitialization()) {\n  init.initializeDefaultRoles();\n  init.initializeEmptyScores();\n  init.initializeEmptyProviders();\n}\n\nconfig-manager.js - Configuration Management\n\nPurpose: Manage provider and model configurations.\n\nKey Functions:\n\naddProvider(config, providerInfo) - Add new provider\naddModel(config, providerName, modelInfo) - Add model to provider\nremoveModel(config, providerName, modelName) - Remove model\nremoveProvider(config, providerName) - Remove provider\nupdateModelPricing(config, providerName, modelName, pricingInfo) - Update pricing\nsetHostModel(config, providerName, modelName) - Set host model\nsetBudget(config, budgetInfo) - Set budget limits\ngetAvailableModels(config) - Get list of available models\ngetModelStatus(model) - Get model availability status\ndisplayConfiguration(config) - Display current configuration\n\nUsage:\n\nconst configManager = require('./scripts/config-manager.js');\nconst config = init.readJSON(init.PROVIDERS_FILE);\n\n// Add new provider\nconfigManager.addProvider(config, {\n  name: 'openai',\n  api_key: '${OPENAI_API_KEY}',\n  base_url: 'https://api.openai.com/v1'\n});\n\n// Add model with subscription pricing\nconfigManager.addModel(config, 'openai', {\n  name: 'gpt-4',\n  pricing_model: 'subscription',\n  subscription_cost: 20.00,\n  valid_from: '2026-02-01',\n  valid_until: '2026-03-01',\n  capabilities: ['reasoning', 'coding']\n});\n\nscore-manager.js - Performance Score Management\n\nPurpose: Manage model performance evaluation scores.\n\nKey Functions:\n\ninitializeModelScore(scores, modelName, provider) - Initialize model scores\nupdateModelScore(scores, modelName, dimension, newScore, evaluator) - Update dimension score\ncalculateOverallScore(dimensions, weights) - Calculate weighted overall score\nupdateRoleFit(scores, modelName, roleName, fitScore) - Update role fit score\ngetTopModelsForRole(scores, roleName, topN) - Get top models for a role\ngetModelsByCapability(scores, capability, minScore) - Get models by capability\nrecordEvaluation(scores, evaluation) - Record complete evaluation\ndisplayScores(scores) - Display all model scores\n\nUsage:\n\nconst scoreManager = require('./scripts/score-manager.js');\n\n// Record evaluation\nscoreManager.recordEvaluation(scores, {\n  evaluator: 'gpt-4',\n  evaluatee: 'claude-3',\n  task_id: 'task-123',\n  role_played: 'developer',\n  scores: {\n    response_speed: 8,\n    thinking_depth: 9,\n    code_quality: 9\n  },\n  role_fit: 'excellent'\n});\n\n// Get top models for architect role\nconst topArchitects = scoreManager.getTopModelsForRole(scores, 'architect', 3);\n\nteam-coordinator.js - Team Coordination\n\nPurpose: Coordinate team assembly and task execution.\n\nKey Class: TeamCoordinator\n\nMethods:\n\nload() - Load configurations\ngetAvailableModels() - Get available models list\nselectHerald() - Select fastest model as herald\nassignRoles(requiredRoles) - Assign roles to models\ncalculateCombinedScore(model, roleFit) - Calculate selection score\ncreateTaskPlan(userRequest) - Create task execution plan\ngenerateMeetingAgenda(meetingType) - Generate meeting agenda\ngenerateEvaluationForms() - Generate peer evaluation forms\ngenerateReport() - Generate task report\n\nUsage:\n\nconst TeamCoordinator = require('./scripts/team-coordinator.js');\n\nconst coordinator = new TeamCoordinator();\ncoordinator.load();\n\n// Select herald\nconst herald = coordinator.selectHerald();\n\n// Assign roles\nconst assignments = coordinator.assignRoles(['architect', 'developer', 'tester']);\n\n// Create task plan\nconst plan = coordinator.createTaskPlan('Build a REST API');\n\nherald.js - Communication System\n\nPurpose: Manage inter-model communication and coordination.\n\nKey Class: Herald\n\nMethods:\n\ninitializeTeam(team) - Initialize team status tracking\nbroadcast(message, excludeSender) - Broadcast message to all\nsendDirectMessage(to, message, from) - Send direct message\nupdateProgress(model, subtaskId, progress, status) - Update task progress\ngetTeamStatus() - Get current team status\ncheckTimeouts() - Check for timeout conditions\npollTeam() - Request status from all members\nreportToHost(status) - Send status report to host\nnotifyFailure(model, error, context) - Notify failure\nnotifyCompletion(model, result) - Notify completion\ngetOverallProgress() - Get overall task progress\n\nUsage:\n\nconst Herald = require('./scripts/herald.js');\n\nconst herald = new Herald('gpt-3.5-turbo', 'openai');\nherald.initializeTeam(team);\n\n// Broadcast update\nherald.broadcast({ type: 'task_update', content: 'Phase 1 complete' });\n\n// Check progress\nconst progress = herald.getOverallProgress();\n\nTemplates Reference\ntask-report.md - Task Execution Report\n\nPurpose: Document complete task execution details.\n\nVariables:\n\ntask_id - Unique task identifier\ntimestamp - Report generation time\nstatus - Task status (completed/failed/in_progress)\nsummary - Executive summary\nteam_members - Array of team member details\nphases - Array of execution phases\nmodel_metrics - Performance metrics per model\ntotal_cost - Total execution cost\ndeliverables - Array of deliverables\nlessons - Lessons learned\nrecommendations - Recommendations\nscore_updates - Model score updates\n\nUsage:\n\nconst { renderTemplateFromFile } = require('./utils/template-renderer.js');\n\nconst report = renderTemplateFromFile('task-report.md', {\n  task_id: 'task-123',\n  timestamp: new Date().toISOString(),\n  status: 'completed',\n  summary: 'Successfully implemented REST API',\n  team_members: [...],\n  phases: [...]\n});\n\nmeeting-minutes.md - Meeting Documentation\n\nPurpose: Document team meeting discussions and decisions.\n\nVariables:\n\nmeeting_id - Unique meeting identifier\nmeeting_type - Type of meeting\ndate - Meeting date\nduration - Meeting duration\nparticipants - Array of participants\nagenda_items - Meeting agenda\nvoting_results - Voting results (if applicable)\naction_items - Action items from meeting\nnext_steps - Next steps to take\nfailure-report.md - Failure Analysis\n\nPurpose: Document and analyze task failures.\n\nVariables:\n\ntask_id - Failed task identifier\nfailure_time - Time of failure\nfailure_type - Type of failure\nseverity - Failure severity\ntimeline - Timeline of events\nprimary_cause - Root cause\ncontributing_factors - Contributing factors\nrecovery_actions - Actions taken for recovery\nrecommendations - Recommendations to prevent recurrence\nevaluation-form.md - Model Evaluation\n\nPurpose: Document peer model evaluations.\n\nVariables:\n\nevaluator_model - Evaluating model\nevaluatee_model - Model being evaluated\ntask_id - Related task\nrole_played - Role in task\nresponse_speed through context_understanding - Dimension scores\nrole_fit - Overall role fit assessment\nstrengths - Model strengths\nimprovements - Areas for improvement\nUtilities Reference\nhelpers.js - General Utilities\n\nFunctions:\n\ngenerateId(prefix) - Generate unique identifier\nformatDate(date) - Format date to ISO string\nformatDuration(ms) - Format milliseconds to readable duration\ncalculateCost(model, inputTokens, outputTokens) - Calculate API cost\ndeepClone(obj) - Deep clone object\nmergeObjects(target, source) - Deep merge objects\nretryWithBackoff(fn, maxRetries, delay) - Retry with exponential backoff\nchunkArray(array, size) - Split array into chunks\ngroupBy(array, key) - Group array by key\nsortBy(array, key, order) - Sort array by key\nuniqueBy(array, key) - Remove duplicates by key\nlogger.js - Logging System\n\nClasses: Logger\n\nLog Levels: DEBUG, INFO, WARN, ERROR\n\nMethods:\n\ndebug(message, data) - Log debug message\ninfo(message, data) - Log info message\nwarn(message, data) - Log warning message\nerror(message, data) - Log error message\nsetLevel(level) - Set log level\nsetLogFile(filePath) - Set log file path\n\nUsage:\n\nconst { createLogger, LOG_LEVELS } = require('./utils/logger.js');\n\nconst logger = createLogger('teamwork', { \n  level: LOG_LEVELS.DEBUG,\n  console: true \n});\n\nlogger.info('Task started', { task_id: 'task-123' });\n\nerrors.js - Custom Errors\n\nError Classes:\n\nValidationError - Input validation errors\nConfigurationError - Configuration errors\nModelNotFoundError - Model not found errors\nProviderNotFoundError - Provider not found errors\nTaskExecutionError - Task execution errors\nTimeoutError - Timeout errors\nBudgetExceededError - Budget exceeded errors\nQuotaExceededError - Quota exceeded errors\nHeraldError - Herald communication errors\n\nFunctions:\n\nhandleError(error, logger) - Standardized error handling\nisRecoverable(error) - Check if error is recoverable\nAPI Reference\nQuick Start\n// 1. Initialize skill\nconst init = require('./scripts/init.js');\nif (init.needsInitialization()) {\n  // Run interactive setup\n  init.initializeDefaultRoles();\n  init.initializeEmptyScores();\n  init.initializeEmptyProviders();\n}\n\n// 2. Configure providers\nconst configManager = require('./scripts/config-manager.js');\nconst config = init.readJSON(init.PROVIDERS_FILE);\n\nconfigManager.addProvider(config, { name: 'openai' });\nconfigManager.addModel(config, 'openai', {\n  name: 'gpt-4',\n  pricing_model: 'pay_per_use',\n  input_cost_per_1k: 0.03,\n  output_cost_per_1k: 0.06,\n  capabilities: ['reasoning', 'coding']\n});\nconfigManager.setHostModel(config, 'openai', 'gpt-4');\ninit.writeJSON(init.PROVIDERS_FILE, config);\n\n// 3. Create team and execute task\nconst TeamCoordinator = require('./scripts/team-coordinator.js');\nconst coordinator = new TeamCoordinator();\ncoordinator.load();\n\nconst herald = coordinator.selectHerald();\nconst team = coordinator.assignRoles(['architect', 'developer', 'tester']);\nconst plan = coordinator.createTaskPlan('Build REST API');\n\n// 4. Execute with herald coordination\nconst Herald = require('./scripts/herald.js');\nconst heraldInstance = new Herald(herald.model, herald.provider);\nheraldInstance.initializeTeam(team);\n\n// 5. Record evaluations and update scores\nconst scoreManager = require('./scripts/score-manager.js');\nconst scores = init.readJSON(init.SCORES_FILE);\n\nscoreManager.recordEvaluation(scores, {\n  evaluator: 'gpt-4',\n  evaluatee: 'claude-3',\n  task_id: plan.task_id,\n  role_played: 'developer',\n  scores: { response_speed: 8, thinking_depth: 9, code_quality: 9 },\n  role_fit: 'excellent'\n});\n\ninit.writeJSON(init.SCORES_FILE, scores);\n\nVersion History\nv1.0.0 (2026-02-12): Initial release with full feature set\nMulti-provider support\nThree pricing models\n8-dimension performance evaluation\nHerald communication system\nComplete workflow management\nTemplate-based reporting"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/ChenXinBest/teamwork",
    "publisherUrl": "https://clawhub.ai/ChenXinBest/teamwork",
    "owner": "ChenXinBest",
    "version": "1.0.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/teamwork",
    "downloadUrl": "https://openagent3.xyz/downloads/teamwork",
    "agentUrl": "https://openagent3.xyz/skills/teamwork/agent",
    "manifestUrl": "https://openagent3.xyz/skills/teamwork/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/teamwork/agent.md"
  }
}