{
  "schemaVersion": "1.0",
  "item": {
    "slug": "symbiont",
    "name": "Symbiont",
    "source": "tencent",
    "type": "skill",
    "category": "AI 智能",
    "sourceUrl": "https://clawhub.ai/jaschadub/symbiont",
    "canonicalUrl": "https://clawhub.ai/jaschadub/symbiont",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/symbiont",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=symbiont",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SKILL.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-30T16:55:25.780Z",
      "expiresAt": "2026-05-07T16:55:25.780Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
        "contentDisposition": "attachment; filename=\"network-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/symbiont"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/symbiont",
    "agentPageUrl": "https://openagent3.xyz/skills/symbiont/agent",
    "manifestUrl": "https://openagent3.xyz/skills/symbiont/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/symbiont/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Symbiont Agent Development Skills Guide",
        "body": "Purpose: This guide helps AI assistants quickly build secure, compliant Symbiont agents following best practices.\n\nFor Full Documentation: See DSL Guide, DSL Specification, and Example Agents"
      },
      {
        "title": "What Makes Symbiont Unique",
        "body": "ORGA Reasoning Loop: Typestate-enforced Observe-Reason-Gate-Act cycle with compile-time phase safety\nCedar Policy Authorization: Formal policy evaluation via cedar-policy crate's Authorizer::is_authorized()\nKnowledge Bridge: Context-aware reasoning with vector-backed retrieval and automatic learning persistence\nDurable Journal: All 7 loop event types recorded for crash recovery and replay without re-calling the LLM\nZero-Trust Security: All inputs untrusted by default, explicit policies required\nPolicy-as-Code: Declarative security rules enforced at runtime\nMulti-Tier Sandboxing: Docker → gVisor → Firecracker isolation\nEnterprise Compliance: HIPAA, SOC2, GDPR patterns built-in\nCryptographic Verification: SchemaPin for MCP tools, AgentPin for agent identity, Ed25519 signatures\nWebhook DX: Signature verification middleware with GitHub/Stripe/Slack presets\nPersistent Memory: Markdown-backed agent memory with retention and compaction"
      },
      {
        "title": "Minimal Viable Agent",
        "body": "metadata {\n    version = \"1.0.0\"\n    author = \"Your Name\"\n    description = \"Brief description of what this agent does\"\n    tags = [\"category\", \"use-case\"]\n}\n\nagent my_agent(input: String) -> String {\n    capabilities = [\"process_data\", \"validate_input\"]\n\n    policy security_policy {\n        // Allow specific operations\n        allow: [\"read_input\", \"write_output\"] if input.length() < 10000\n\n        // Deny dangerous operations\n        deny: [\"network_access\", \"file_system\"] if true\n\n        // Require conditions\n        require: {\n            input_validation: true,\n            output_sanitization: true\n        }\n\n        // Audit important actions\n        audit: {\n            log_level: \"info\",\n            include_input: false,  // Don't log sensitive data\n            include_output: true\n        }\n    }\n\n    with memory = \"ephemeral\", security = \"high\", timeout = 30000 {\n        try {\n            // Validate input\n            if input.is_empty() {\n                return error(\"Input cannot be empty\");\n            }\n\n            // Process data\n            let result = process(input);\n\n            // Return result\n            return result;\n\n        } catch (error) {\n            log(\"ERROR\", \"Processing failed: \" + error.message);\n            return error(\"Processing failed\");\n        }\n    }\n}\n\nfn process(data: String) -> String {\n    // Your processing logic here\n    return data.to_uppercase();\n}"
      },
      {
        "title": "Agentic Reasoning Loop (ORGA Cycle)",
        "body": "The reasoning loop drives autonomous agent behavior through a typestate-enforced cycle:\n\nObserve — Collect results from previous tool executions\nReason — LLM produces proposed actions (tool calls or text responses)\nGate — Policy engine evaluates each proposed action\nAct — Approved actions are dispatched to tool executors"
      },
      {
        "title": "Minimal Reasoning Loop",
        "body": "use std::sync::Arc;\nuse symbi_runtime::reasoning::{\n    ReasoningLoopRunner, LoopConfig, Conversation, ConversationMessage,\n    circuit_breaker::CircuitBreakerRegistry,\n    context_manager::DefaultContextManager,\n    executor::DefaultActionExecutor,\n    loop_types::BufferedJournal,\n    policy_bridge::DefaultPolicyGate,\n};\nuse symbi_runtime::types::AgentId;\n\nlet runner = ReasoningLoopRunner {\n    provider: Arc::new(my_inference_provider),\n    policy_gate: Arc::new(DefaultPolicyGate::permissive()),\n    executor: Arc::new(DefaultActionExecutor::default()),\n    context_manager: Arc::new(DefaultContextManager::default()),\n    circuit_breakers: Arc::new(CircuitBreakerRegistry::default()),\n    journal: Arc::new(BufferedJournal::new(1000)),\n    knowledge_bridge: None, // Optional: add KnowledgeBridge for RAG\n};\n\nlet mut conv = Conversation::with_system(\"You are a helpful agent.\");\nconv.push(ConversationMessage::user(\"What is 6 * 7?\"));\n\nlet result = runner\n    .run(AgentId::new(), conv, LoopConfig::default())\n    .await;"
      },
      {
        "title": "Phase Transitions (Compile-Time Safe)",
        "body": "Invalid transitions are caught at compile time:\n\nReasoning → PolicyCheck → ToolDispatching → Observing → Reasoning (loop)\n                                                      → Complete (exit)"
      },
      {
        "title": "Journal Events",
        "body": "The journal records all 7 event types for durable execution:\n\nEventWhenPurposeStartedLoop beginConfiguration snapshotReasoningCompleteAfter LLM response, before policyCrash recovery without re-calling LLMPolicyEvaluatedAfter policy checkAction counts, denied countsToolsDispatchedAfter tool executionTool count, wall-clock durationObservationsCollectedAfter collecting resultsObservation countTerminatedLoop endReason, iterations, usage, durationRecoveryTriggeredOn tool failure recoveryStrategy, error context"
      },
      {
        "title": "Cedar Policy Gate (Feature: cedar)",
        "body": "Formal authorization using the cedar-policy crate:\n\nuse symbi_runtime::reasoning::cedar_gate::{CedarPolicyGate, CedarPolicy};\n\nlet gate = CedarPolicyGate::deny_by_default();\n\n// Cedar policies use entity types: Agent (principal), Action (action), Resource (resource)\ngate.add_policy(CedarPolicy {\n    name: \"allow_respond\".into(),\n    source: r#\"permit(principal, action == Action::\"respond\", resource);\"#.into(),\n    active: true,\n}).await;\n\ngate.add_policy(CedarPolicy {\n    name: \"deny_search\".into(),\n    source: r#\"forbid(principal, action == Action::\"tool_call::search\", resource);\"#.into(),\n    active: true,\n}).await;\n\nAction mapping: tool_call::<name>, respond, delegate::<target>, terminate.\n\nCedar semantics enforced: forbid overrides permit, default deny, skip-on-error."
      },
      {
        "title": "Knowledge Bridge (Optional)",
        "body": "Add context-aware reasoning with vector-backed retrieval:\n\nuse symbi_runtime::reasoning::KnowledgeBridge;\n\nlet bridge = Arc::new(KnowledgeBridge::new(knowledge_config));\n\nlet runner = ReasoningLoopRunner {\n    // ... other fields ...\n    knowledge_bridge: Some(bridge),\n};\n\nThe bridge injects relevant context before each reasoning step and persists learnings after loop completion."
      },
      {
        "title": "1. Data Processing Agent (Read/Transform/Write)",
        "body": "policy data_processing_policy {\n    // Allow data operations with size limits\n    allow: [\n        \"read_data\",\n        \"transform_data\",\n        \"write_output\"\n    ] if request.data_size < 10_000_000  // 10MB limit\n\n    // Deny dangerous operations\n    deny: [\n        \"execute_code\",\n        \"spawn_process\",\n        \"network_access\"\n    ] if true\n\n    // Require validation\n    require: {\n        input_validation: true,\n        output_sanitization: true,\n        rate_limiting: \"100/minute\"\n    }\n\n    // Audit with PII protection\n    audit: {\n        log_level: \"info\",\n        include_input: false,      // Protect PII\n        include_output: false,     // Protect PII\n        include_metadata: true,\n        retention_days: 90\n    }\n}"
      },
      {
        "title": "2. API Integration Agent (External Calls)",
        "body": "policy api_integration_policy {\n    // Allow specific endpoints only\n    allow: [\n        \"https_request\"\n    ] if request.url.starts_with(\"https://api.trusted-service.com/\")\n\n    // Deny everything else\n    deny: [\n        \"http_request\",           // Only HTTPS\n        \"file_access\",\n        \"database_access\"\n    ] if true\n\n    // Require security measures\n    require: {\n        tls_verification: true,\n        api_key_rotation: \"30_days\",\n        rate_limiting: \"1000/hour\",\n        timeout: \"5000ms\"\n    }\n\n    // Audit all API calls\n    audit: {\n        log_level: \"info\",\n        include_request_headers: true,\n        include_response_status: true,\n        include_latency: true,\n        alert_on_errors: true\n    }\n}"
      },
      {
        "title": "3. Security Scanning Agent (Audit/Compliance)",
        "body": "policy security_scanner_policy {\n    // Allow read-only scanning\n    allow: [\n        \"read_files\",\n        \"analyze_code\",\n        \"check_dependencies\",\n        \"validate_configs\"\n    ] if scan.depth <= 5  // Limit recursion\n\n    // Deny modifications\n    deny: [\n        \"write_files\",\n        \"modify_permissions\",\n        \"execute_code\"\n    ] if true\n\n    // Require strict controls\n    require: {\n        signature_verification: true,\n        checksum_validation: true,\n        sandbox_tier: \"Tier2\",  // gVisor isolation\n        max_scan_time: \"300000ms\"  // 5 minutes\n    }\n\n    // Audit findings\n    audit: {\n        log_level: \"warning\",\n        include_findings: true,\n        include_risk_scores: true,\n        alert_on_critical: true,\n        compliance_tags: [\"HIPAA\", \"SOC2\"]\n    }\n}"
      },
      {
        "title": "4. Workflow Orchestration Agent (Multi-Step)",
        "body": "policy orchestration_policy {\n    // Allow agent coordination\n    allow: [\n        \"invoke_agent\",\n        \"message_passing\",\n        \"state_management\"\n    ] if orchestration.depth < 10  // Prevent infinite loops\n\n    // Deny resource-intensive ops\n    deny: [\n        \"spawn_unlimited_agents\",\n        \"recursive_orchestration\"\n    ] if true\n\n    // Require controls\n    require: {\n        max_concurrent_agents: 50,\n        total_timeout: \"600000ms\",  // 10 minutes\n        failure_recovery: \"retry_with_backoff\",\n        circuit_breaker: true\n    }\n\n    // Audit workflow\n    audit: {\n        log_level: \"info\",\n        include_workflow_graph: true,\n        include_timing: true,\n        include_dependencies: true,\n        trace_id: true\n    }\n}"
      },
      {
        "title": "Sandbox Tier Selection Guide",
        "body": "TierTechnologyUse CasePerformanceSecurityOverheadTier1DockerGeneral workloadsFastGoodLow (~100ms)Tier2gVisorUntrusted codeMediumHighMedium (~500ms)Tier3FirecrackerMulti-tenant isolationSlowerMaximumHigh (~2s)NativeProcess onlyDevelopment ONLYFastestNoneMinimal\n\nSelection Guide:\n\nTier1 (Docker): Default choice for most agents\nTier2 (gVisor): Processing external data, user-provided code\nTier3 (Firecracker): Highly sensitive, regulatory compliance\nNative: NEVER use in production (development/testing only)"
      },
      {
        "title": "Type System",
        "body": "// Primitives\nlet name: String = \"value\";\nlet count: Integer = 42;\nlet price: Float = 19.99;\nlet active: Boolean = true;\nlet data: Bytes = [0x01, 0x02, 0x03];\n\n// Collections\nlet tags: Array<String> = [\"tag1\", \"tag2\"];\nlet config: Map<String, String> = {\"key\": \"value\"};\nlet unique: Set<Integer> = {1, 2, 3};\n\n// Security-Aware Types\nlet sensitive: EncryptedData<String> = encrypt(\"secret\");\nlet private_data: PrivateData<Integer> = private(123);\nlet verified: VerifiableResult<String> = sign(\"data\");\n\n// Optional Types\nlet optional: Optional<String> = Some(\"value\");\nlet none_value: Optional<String> = None;"
      },
      {
        "title": "Control Flow",
        "body": "// If/Else\nif condition {\n    // true branch\n} else if other_condition {\n    // else if branch\n} else {\n    // false branch\n}\n\n// Match (Pattern Matching)\nmatch value {\n    Some(x) => process(x),\n    None => default_value,\n    Error(e) => handle_error(e)\n}\n\n// Loops\nfor item in collection {\n    process(item);\n}\n\nwhile condition {\n    do_work();\n}\n\n// Error Handling\ntry {\n    risky_operation();\n} catch (error) {\n    log(\"ERROR\", error.message);\n    return error(\"Operation failed\");\n}"
      },
      {
        "title": "Policy Language",
        "body": "policy policy_name {\n    // Allow operations with conditions\n    allow: [\"operation1\", \"operation2\"] if condition\n    allow: \"operation3\" if complex.condition && other.check\n\n    // Deny operations\n    deny: [\"dangerous_op\"] if true\n    deny: \"risky_op\" if environment == \"production\"\n\n    // Required conditions\n    require: {\n        authentication: true,\n        authorization: \"role:admin\",\n        encryption: \"AES-256-GCM\",\n        rate_limit: \"100/second\"\n    }\n\n    // Audit specification\n    audit: {\n        log_level: \"info\" | \"warning\" | \"error\",\n        include_input: boolean,\n        include_output: boolean,\n        retention_days: integer,\n        compliance_tags: array<string>\n    }\n}"
      },
      {
        "title": "With Block (Execution Context)",
        "body": "with\n    memory = \"ephemeral\" | \"persistent\",\n    privacy = \"strict\" | \"medium\" | \"low\",\n    security = \"high\" | \"medium\" | \"low\",\n    sandbox = \"Tier1\" | \"Tier2\" | \"Tier3\",\n    timeout = milliseconds,\n    requires = [\"clearance:level5\", \"approval:manager\"]\n{\n    // Agent implementation\n}"
      },
      {
        "title": "1. Secrets Management (Vault/OpenBao)",
        "body": "agent secure_api_caller(endpoint: String) -> String {\n    policy secret_access {\n        allow: [\"read_secret\"] if secret.path.starts_with(\"application/\")\n        deny: [\"write_secret\", \"delete_secret\"] if true\n\n        require: {\n            vault_auth: true,\n            token_rotation: \"1_hour\"\n        }\n\n        audit: {\n            log_level: \"warning\",\n            include_secret_path: true,\n            include_secret_value: false  // NEVER log secrets\n        }\n    }\n\n    with memory = \"ephemeral\", security = \"high\" {\n        // Reference secrets using vault:// protocol\n        let api_key = vault://application/api/key;\n        let api_secret = vault://application/api/secret;\n\n        // Use secrets in API call\n        let response = http.post(endpoint, {\n            headers: {\n                \"Authorization\": \"Bearer \" + api_key,\n                \"X-API-Secret\": api_secret\n            }\n        });\n\n        return response.body;\n    }\n}"
      },
      {
        "title": "2. MCP Tool Integration (Cryptographic Verification)",
        "body": "agent mcp_tool_user(tool_name: String, input: String) -> String {\n    capabilities = [\"invoke_mcp_tool\"]\n\n    policy mcp_security {\n        // Only allow verified tools\n        allow: [\"mcp_invoke\"] if tool.verified == true\n        deny: [\"mcp_invoke\"] if tool.signature_invalid\n\n        require: {\n            schema_pin_verification: true,  // ECDSA P-256\n            tofu_trust_model: true,         // Trust-On-First-Use\n            tool_review_required: false     // Auto for signed tools\n        }\n\n        audit: {\n            log_level: \"info\",\n            include_tool_signature: true,\n            include_tool_schema: true\n        }\n    }\n\n    with security = \"high\" {\n        // Discover and invoke MCP tool\n        let tool = mcp.discover(tool_name);\n\n        // Verify cryptographic signature\n        if !tool.verify_signature() {\n            return error(\"Tool signature verification failed\");\n        }\n\n        // Invoke tool\n        let result = mcp.invoke(tool, input);\n        return result;\n    }\n}"
      },
      {
        "title": "3. HTTP Webhook Processing",
        "body": "agent webhook_processor(request: HttpRequest) -> HttpResponse {\n    capabilities = [\"process_webhook\", \"validate_signature\"]\n\n    policy webhook_policy {\n        allow: [\"parse_json\", \"validate_data\"] if request.size < 1_000_000\n        deny: [\"execute_code\", \"file_access\"] if true\n\n        require: {\n            signature_verification: true,\n            rate_limiting: \"1000/minute\",\n            timeout: \"5000ms\"\n        }\n\n        audit: {\n            log_level: \"info\",\n            include_request_id: true,\n            include_source_ip: true,\n            alert_on_invalid_signature: true\n        }\n    }\n\n    with memory = \"ephemeral\", timeout = 5000 {\n        // Verify webhook signature (e.g., GitHub, Stripe)\n        let signature = request.headers[\"X-Webhook-Signature\"];\n        let secret = vault://webhooks/secret;\n\n        if !verify_hmac_sha256(request.body, secret, signature) {\n            return HttpResponse(401, \"Invalid signature\");\n        }\n\n        // Parse and process webhook\n        let data = json.parse(request.body);\n        let result = process_event(data);\n\n        return HttpResponse(200, json.stringify(result));\n    }\n}"
      },
      {
        "title": "4. Scheduled Execution",
        "body": "metadata {\n    schedule = \"0 */6 * * *\"  // Every 6 hours (cron format)\n}\n\nagent scheduled_cleanup() -> String {\n    capabilities = [\"cleanup_data\", \"archival\"]\n\n    policy cleanup_policy {\n        allow: [\"read_old_data\", \"archive\", \"delete\"] if data.age > 90_days\n        deny: [\"delete\"] if data.age <= 90_days\n\n        require: {\n            backup_verification: true,\n            retention_check: true\n        }\n\n        audit: {\n            log_level: \"warning\",\n            include_deleted_count: true,\n            include_archived_count: true\n        }\n    }\n\n    with memory = \"persistent\", timeout = 300000 {\n        let old_data = query_old_data(90);\n        let archived_count = archive_data(old_data);\n        let deleted_count = delete_archived_data(old_data);\n\n        return \"Archived: \" + archived_count + \", Deleted: \" + deleted_count;\n    }\n}"
      },
      {
        "title": "5. Persistent Memory (DSL Configuration)",
        "body": "// Top-level memory block — configures Markdown-backed agent memory\nmemory agent_memory {\n    store    markdown           // Storage backend (markdown only for now)\n    path     \"data/agents\"     // Root directory for memory files\n    retention 90d              // How long daily logs are kept\n    search {\n        vector_weight  0.7     // Semantic similarity weight\n        keyword_weight 0.3     // BM25 keyword match weight\n    }\n}\n\nMemory files are human-readable Markdown stored at data/agents/{agent_id}/memory.md with sections for Facts, Procedures, and Learned Patterns. Daily interaction logs are appended to logs/{date}.md and compacted based on retention policy.\n\nREPL Commands:\n\n:memory inspect <agent-id> — Display agent's memory.md\n:memory compact <agent-id> — Flush daily logs, remove expired entries\n:memory purge <agent-id> — Delete all memory for an agent"
      },
      {
        "title": "6. Webhook Endpoints (DSL Configuration)",
        "body": "// Top-level webhook block — defines verified webhook endpoints\nwebhook github_events {\n    path     \"/hooks/github\"\n    provider github                              // Preset: github, stripe, slack, custom\n    secret   \"secret://vault/github-webhook-secret\"  // HMAC secret (supports vault refs)\n    agent    code_review_agent                   // Route to this agent\n    filter {\n        json_path \"$.action\"\n        equals    \"opened\"                       // Only process \"opened\" events\n    }\n}\n\nProvider presets configure signature verification automatically:\n\ngithub: X-Hub-Signature-256 header, sha256= prefix, HMAC-SHA256\nstripe: Stripe-Signature header, HMAC-SHA256\nslack: X-Slack-Signature header, v0= prefix, HMAC-SHA256\ncustom: X-Signature header, HMAC-SHA256\n\nAll signatures are verified using constant-time comparison before the request reaches the agent handler. Invalid signatures return HTTP 401.\n\nREPL Commands:\n\n:webhook list — Show configured webhook definitions"
      },
      {
        "title": "7. Persistent Memory & RAG Engine",
        "body": "agent knowledge_assistant(query: String) -> String {\n    capabilities = [\"semantic_search\", \"rag_retrieval\", \"synthesis\"]\n\n    policy knowledge_policy {\n        allow: [\n            \"vector_search\",\n            \"knowledge_retrieval\",\n            \"context_synthesis\"\n        ] if query.length() < 1000\n\n        deny: [\"knowledge_modification\"] if true\n\n        require: {\n            embedding_model: \"all-MiniLM-L6-v2\",\n            similarity_threshold: 0.7,\n            max_results: 10\n        }\n\n        audit: {\n            log_level: \"info\",\n            include_query: true,\n            include_relevance_scores: true\n        }\n    }\n\n    with memory = \"persistent\", security = \"medium\" {\n        // Semantic search in vector database\n        let context = rag.search(query, {\n            top_k: 5,\n            similarity_threshold: 0.7\n        });\n\n        // Synthesize response\n        let response = synthesize(query, context);\n\n        // Store interaction for future learning\n        memory.store({\n            query: query,\n            response: response,\n            timestamp: now()\n        });\n\n        return response;\n    }\n}"
      },
      {
        "title": "8. Inter-Agent Communication",
        "body": "agent coordinator(task: String) -> String {\n    capabilities = [\"message_passing\", \"agent_coordination\"]\n\n    policy coordination_policy {\n        allow: [\n            \"send_message\",\n            \"receive_message\",\n            \"invoke_agent\"\n        ] if coordination.depth < 5\n\n        deny: [\"broadcast\"] if true  // Prevent message storms\n\n        require: {\n            message_encryption: true,  // AES-256-GCM\n            message_signing: true,     // Ed25519\n            max_concurrent_agents: 10\n        }\n\n        audit: {\n            log_level: \"info\",\n            include_message_flow: true,\n            include_agent_graph: true\n        }\n    }\n\n    with memory = \"persistent\" {\n        // Invoke specialized agent\n        let validator_response = agent.invoke(\"data_validator\", {\n            data: task\n        });\n\n        // Send encrypted message to another agent\n        agent.send_message(\"processor_agent\", {\n            type: \"process_request\",\n            payload: validator_response,\n            priority: \"high\"\n        });\n\n        // Wait for response\n        let result = agent.receive_message(timeout = 10000);\n\n        return result.payload;\n    }\n}"
      },
      {
        "title": "Pattern 1: Data Validation Pipeline",
        "body": "agent data_validator(data: String, schema: String) -> ValidationResult {\n    capabilities = [\"schema_validation\", \"data_quality_check\"]\n\n    policy validation_policy {\n        allow: [\"parse_schema\", \"validate_data\", \"quality_scoring\"]\n        deny: [\"modify_data\", \"execute_code\"]\n        require: {\n            max_data_size: \"10MB\",\n            timeout: \"5000ms\"\n        }\n        audit: {\n            log_level: \"warning\",\n            include_validation_errors: true\n        }\n    }\n\n    with memory = \"ephemeral\", security = \"high\" {\n        try {\n            // Parse schema\n            let parsed_schema = json.parse(schema);\n\n            // Validate against schema\n            let validation = validate(data, parsed_schema);\n\n            // Calculate quality score\n            let quality_score = calculate_quality(data);\n\n            return ValidationResult {\n                valid: validation.success,\n                errors: validation.errors,\n                quality_score: quality_score,\n                recommendations: generate_recommendations(validation)\n            };\n\n        } catch (error) {\n            return ValidationResult {\n                valid: false,\n                errors: [error.message],\n                quality_score: 0.0,\n                recommendations: []\n            };\n        }\n    }\n}"
      },
      {
        "title": "Pattern 2: Format Converter",
        "body": "agent format_converter(data: String, from_format: String, to_format: String) -> String {\n    capabilities = [\"parse_format\", \"transform_data\", \"serialize_format\"]\n\n    policy conversion_policy {\n        allow: [\"parse\", \"transform\", \"serialize\"] if data.size < 50_000_000\n        deny: [\"execute_code\", \"file_access\"]\n        require: {\n            supported_formats: [\"json\", \"xml\", \"yaml\", \"csv\"],\n            charset_validation: true\n        }\n        audit: {\n            log_level: \"info\",\n            include_conversion_stats: true\n        }\n    }\n\n    with memory = \"ephemeral\", timeout = 10000 {\n        // Validate formats\n        if !is_supported(from_format) || !is_supported(to_format) {\n            return error(\"Unsupported format\");\n        }\n\n        // Parse source format\n        let parsed = parse(data, from_format);\n\n        // Transform to intermediate representation\n        let transformed = normalize(parsed);\n\n        // Serialize to target format\n        let result = serialize(transformed, to_format);\n\n        return result;\n    }\n}"
      },
      {
        "title": "Pattern 3: API Aggregator",
        "body": "agent api_aggregator(sources: Array<String>) -> AggregatedData {\n    capabilities = [\"parallel_requests\", \"data_normalization\", \"deduplication\"]\n\n    policy aggregation_policy {\n        allow: [\"https_request\"] if url in sources\n        deny: [\"http_request\", \"file_access\"]\n        require: {\n            tls_verification: true,\n            concurrent_limit: 10,\n            timeout_per_request: \"3000ms\",\n            total_timeout: \"15000ms\"\n        }\n        audit: {\n            log_level: \"info\",\n            include_source_latencies: true,\n            alert_on_source_failure: true\n        }\n    }\n\n    with memory = \"ephemeral\", timeout = 15000 {\n        let results = [];\n\n        // Parallel fetch from all sources\n        for source in sources {\n            async {\n                try {\n                    let response = http.get(source, {\n                        timeout: 3000,\n                        verify_tls: true\n                    });\n                    results.push(response.json());\n                } catch (error) {\n                    log(\"WARNING\", \"Source failed: \" + source);\n                }\n            }\n        }\n\n        // Wait for all requests\n        await_all(results);\n\n        // Normalize and deduplicate\n        let normalized = normalize_data(results);\n        let deduplicated = deduplicate(normalized);\n\n        return AggregatedData {\n            sources: sources.length,\n            records: deduplicated.length,\n            data: deduplicated\n        };\n    }\n}"
      },
      {
        "title": "Pattern 4: Security Scanner",
        "body": "agent security_scanner(target: String, scan_type: String) -> ScanReport {\n    capabilities = [\n        \"vulnerability_detection\",\n        \"dependency_analysis\",\n        \"compliance_check\"\n    ]\n\n    policy scanner_policy {\n        allow: [\n            \"read_files\",\n            \"analyze_dependencies\",\n            \"check_vulnerabilities\"\n        ] if scan.depth <= 10\n\n        deny: [\n            \"write_files\",\n            \"execute_code\",\n            \"network_access\"\n        ]\n\n        require: {\n            sandbox_tier: \"Tier2\",  // gVisor isolation\n            cvss_scoring: true,\n            cwe_classification: true\n        }\n\n        audit: {\n            log_level: \"warning\",\n            include_findings: true,\n            include_cvss_scores: true,\n            compliance_tags: [\"OWASP\", \"CWE\"]\n        }\n    }\n\n    with memory = \"ephemeral\", security = \"high\", sandbox = \"Tier2\" {\n        let findings = [];\n\n        // Scan based on type\n        match scan_type {\n            \"dependencies\" => {\n                findings = scan_dependencies(target);\n            },\n            \"vulnerabilities\" => {\n                findings = scan_vulnerabilities(target);\n            },\n            \"compliance\" => {\n                findings = check_compliance(target, [\"HIPAA\", \"SOC2\"]);\n            },\n            _ => {\n                return error(\"Unknown scan type\");\n            }\n        }\n\n        // Calculate risk score\n        let risk_score = calculate_risk(findings);\n\n        return ScanReport {\n            target: target,\n            scan_type: scan_type,\n            findings_count: findings.length,\n            critical_count: count_by_severity(findings, \"CRITICAL\"),\n            high_count: count_by_severity(findings, \"HIGH\"),\n            risk_score: risk_score,\n            findings: findings,\n            recommendations: generate_remediation(findings)\n        };\n    }\n}"
      },
      {
        "title": "Pattern 5: Notification Router",
        "body": "agent notification_router(event: Event, routing_rules: RoutingRules) -> String {\n    capabilities = [\"event_filtering\", \"multi_channel_delivery\", \"retry_logic\"]\n\n    policy notification_policy {\n        allow: [\n            \"send_email\",\n            \"send_slack\",\n            \"send_webhook\"\n        ] if event.priority != \"spam\"\n\n        deny: [\"send_sms\"] if event.priority == \"low\"  // Cost control\n\n        require: {\n            rate_limiting: \"100/minute\",\n            retry_attempts: 3,\n            backoff_strategy: \"exponential\"\n        }\n\n        audit: {\n            log_level: \"info\",\n            include_delivery_status: true,\n            include_retry_count: true\n        }\n    }\n\n    with memory = \"ephemeral\" {\n        // Filter event\n        if !should_notify(event, routing_rules) {\n            return \"Event filtered\";\n        }\n\n        // Determine channels\n        let channels = select_channels(event, routing_rules);\n\n        // Send notifications with retry\n        let results = [];\n        for channel in channels {\n            let success = send_with_retry(channel, event, max_attempts = 3);\n            results.push({channel: channel, success: success});\n        }\n\n        return format_results(results);\n    }\n}"
      },
      {
        "title": "Pattern 6: Workflow Orchestrator",
        "body": "agent workflow_orchestrator(workflow_spec: WorkflowSpec) -> WorkflowResult {\n    capabilities = [\n        \"step_execution\",\n        \"dependency_resolution\",\n        \"failure_recovery\"\n    ]\n\n    policy orchestration_policy {\n        allow: [\n            \"invoke_agent\",\n            \"manage_state\",\n            \"handle_errors\"\n        ] if workflow.depth < 10\n\n        deny: [\n            \"recursive_workflows\",\n            \"unlimited_agents\"\n        ]\n\n        require: {\n            max_concurrent_steps: 20,\n            total_timeout: \"600000ms\",  // 10 minutes\n            checkpoint_enabled: true,\n            circuit_breaker: true\n        }\n\n        audit: {\n            log_level: \"info\",\n            include_workflow_graph: true,\n            include_step_timing: true,\n            include_failure_trace: true\n        }\n    }\n\n    with memory = \"persistent\", timeout = 600000 {\n        let state = WorkflowState.new(workflow_spec);\n\n        try {\n            // Execute workflow steps\n            for step in workflow_spec.steps {\n                // Check dependencies\n                if !dependencies_met(step, state) {\n                    await_dependencies(step, state);\n                }\n\n                // Execute step\n                let result = execute_step(step, state);\n\n                // Update state with checkpoint\n                state.complete_step(step.id, result);\n                checkpoint(state);\n            }\n\n            return WorkflowResult {\n                status: \"completed\",\n                outputs: state.collect_outputs(),\n                execution_time: state.elapsed_time()\n            };\n\n        } catch (error) {\n            // Attempt recovery\n            if can_recover(error, state) {\n                let recovered_state = recover_workflow(state);\n                return resume_workflow(recovered_state);\n            }\n\n            return WorkflowResult {\n                status: \"failed\",\n                error: error.message,\n                completed_steps: state.completed_steps(),\n                checkpoint: state.last_checkpoint()\n            };\n        }\n    }\n}"
      },
      {
        "title": "❌ Anti-Pattern 1: Missing Policy Definitions",
        "body": "// BAD: No policies defined\nagent insecure_agent(input: String) -> String {\n    with memory = \"ephemeral\" {\n        return process(input);\n    }\n}\n\n✅ Fix: Always define explicit policies\n\nagent secure_agent(input: String) -> String {\n    policy security_policy {\n        allow: [\"process_data\"] if input.length() < 10000\n        deny: [\"network_access\", \"file_access\"]\n        require: {input_validation: true}\n        audit: {log_level: \"info\"}\n    }\n\n    with memory = \"ephemeral\" {\n        return process(input);\n    }\n}"
      },
      {
        "title": "❌ Anti-Pattern 2: Overly Permissive Policies",
        "body": "// BAD: Allows everything\npolicy bad_policy {\n    allow: \"*\" if true\n}\n\n✅ Fix: Use principle of least privilege\n\npolicy good_policy {\n    // Only allow what's needed\n    allow: [\"read_data\", \"write_output\"] if authorized\n    // Explicitly deny risky operations\n    deny: [\"execute_code\", \"network_access\", \"file_system\"]\n    require: {authentication: true}\n}"
      },
      {
        "title": "❌ Anti-Pattern 3: No Resource Limits",
        "body": "// BAD: No timeout, unlimited memory\nwith memory = \"persistent\" {\n    // Could run forever or consume unlimited memory\n    while true {\n        expensive_operation();\n    }\n}\n\n✅ Fix: Always set resource limits\n\nwith\n    memory = \"ephemeral\",       // Use ephemeral when possible\n    timeout = 30000,            // 30 second timeout\n    max_memory_mb = 512,        // Memory limit\n    max_cpu_cores = 1.0         // CPU limit\n{\n    for item in limited_dataset {\n        process(item);\n    }\n}"
      },
      {
        "title": "❌ Anti-Pattern 4: Logging Sensitive Data",
        "body": "// BAD: Logs passwords and secrets\naudit: {\n    log_level: \"info\",\n    include_input: true,   // Will log passwords!\n    include_output: true\n}\n\n✅ Fix: Never log sensitive data\n\naudit: {\n    log_level: \"info\",\n    include_input: false,      // Protect PII/secrets\n    include_output: false,     // Protect PII/secrets\n    include_metadata: true,    // OK to log metadata\n    include_timing: true       // OK to log performance\n}"
      },
      {
        "title": "❌ Anti-Pattern 5: Hardcoded Secrets",
        "body": "// BAD: API key hardcoded\nlet api_key = \"sk_live_abc123xyz789\";\n\n✅ Fix: Use Vault references\n\n// GOOD: Secret from Vault\nlet api_key = vault://application/api/key;"
      },
      {
        "title": "❌ Anti-Pattern 6: No Input Validation",
        "body": "// BAD: No validation\nagent bad_agent(input: String) -> String {\n    return execute_command(input);  // Command injection risk!\n}\n\n✅ Fix: Always validate input\n\nagent good_agent(input: String) -> String {\n    // Validate input\n    if !is_valid_input(input) {\n        return error(\"Invalid input\");\n    }\n\n    // Sanitize before use\n    let sanitized = sanitize(input);\n    return safe_process(sanitized);\n}"
      },
      {
        "title": "❌ Anti-Pattern 7: Wrong Sandbox Tier",
        "body": "// BAD: Processing untrusted code in Tier1\nagent code_runner(untrusted_code: String) -> String {\n    with sandbox = \"Tier1\" {  // Not enough isolation!\n        return eval(untrusted_code);\n    }\n}\n\n✅ Fix: Use appropriate sandbox tier\n\nagent code_runner(untrusted_code: String) -> String {\n    policy strict_isolation {\n        deny: [\"file_access\", \"network_access\"]\n        require: {sandbox_tier: \"Tier3\"}\n    }\n\n    with sandbox = \"Tier3\" {  // Firecracker microVM\n        return safe_eval(untrusted_code);\n    }\n}"
      },
      {
        "title": "❌ Anti-Pattern 8: No Error Handling",
        "body": "// BAD: Unhandled errors will crash agent\nagent fragile_agent(url: String) -> String {\n    let response = http.get(url);  // What if this fails?\n    return response.body;\n}\n\n✅ Fix: Always handle errors\n\nagent robust_agent(url: String) -> String {\n    try {\n        let response = http.get(url, timeout = 5000);\n\n        if response.status != 200 {\n            return error(\"HTTP error: \" + response.status);\n        }\n\n        return response.body;\n\n    } catch (error) {\n        log(\"ERROR\", \"Request failed: \" + error.message);\n        return error(\"Request failed\");\n    }\n}"
      },
      {
        "title": "Validation Checklist",
        "body": "Before deploying an agent, verify:"
      },
      {
        "title": "Security Checklist",
        "body": "Policies defined for all operations\n Principle of least privilege applied (deny by default)\n Sandbox tier appropriate for workload\n Secrets referenced via Vault (never hardcoded)\n Input validation present for all user inputs\n Output sanitization prevents injection attacks\n No sensitive data in audit logs"
      },
      {
        "title": "Resource Management",
        "body": "Timeout configured appropriately\n Memory limits set based on workload\n CPU limits defined\n Concurrency limits for agent invocations\n Rate limiting configured for external calls"
      },
      {
        "title": "Error Handling",
        "body": "Try/catch blocks around risky operations\n Error messages are informative but not leaking secrets\n Retry logic for transient failures\n Circuit breakers for cascading failures\n Graceful degradation when dependencies fail"
      },
      {
        "title": "Compliance & Audit",
        "body": "Audit logging configured\n Compliance tags added (HIPAA, SOC2, GDPR as needed)\n Retention policies set appropriately\n PII handling follows regulations\n Data encryption at rest and in transit"
      },
      {
        "title": "Testing",
        "body": "Unit tests for core functions\n Integration tests with dependencies\n Security tests (injection, overflow, etc.)\n Performance tests (within resource limits)\n Chaos tests (failure scenarios)"
      },
      {
        "title": "Built-in Functions",
        "body": "CategoryFunctionPurposeStringlen(s)String lengthto_upper(s)Convert to uppercaseto_lower(s)Convert to lowercasetrim(s)Remove whitespacesplit(s, delim)Split stringcontains(s, substr)Check substringJSONjson.parse(s)Parse JSON stringjson.stringify(obj)Convert to JSONjson.validate(s, schema)Validate against schemaHTTPhttp.get(url, opts)HTTP GET requesthttp.post(url, body, opts)HTTP POST requestverify_hmac_sha256(data, secret, sig)Verify HMAC signatureCryptoencrypt(data)AES-256-GCM encryptdecrypt(data)AES-256-GCM decrypthash_sha256(data)SHA-256 hashsign(data)Ed25519 signatureTimenow()Current timestampsleep(ms)Sleep for millisecondsformat_time(ts, fmt)Format timestampLogginglog(level, msg)Log messagedebug(msg)Debug loginfo(msg)Info logwarn(msg)Warning logerror(msg)Error logValidationis_valid_email(s)Email validationis_valid_url(s)URL validationis_valid_json(s)JSON validationArrayspush(arr, item)Add to arraypop(arr)Remove from arraymap(arr, fn)Map functionfilter(arr, fn)Filter arrayreduce(arr, fn, init)Reduce array"
      },
      {
        "title": "Resource Limit Recommendations",
        "body": "Workload TypeMemoryCPUTimeoutSandboxData Validation256MB0.55sTier1Format Conversion512MB1.010sTier1API Integration512MB1.015sTier1Code Analysis1GB2.030sTier2Security Scan2GB2.060sTier2ML Inference4GB4.0120sTier2Workflow Orchestration1GB1.0600sTier1Untrusted Code512MB1.010sTier3"
      },
      {
        "title": "Common Error Codes",
        "body": "CodeMeaningResolutionPOLICY_VIOLATIONOperation denied by policyCheck policy allow/deny rulesRESOURCE_EXCEEDEDResource limit reachedIncrease limits or optimize codeTIMEOUTExecution timeoutIncrease timeout or optimizeAUTH_FAILEDAuthentication failedCheck Vault credentialsSIGNATURE_INVALIDCrypto signature invalidVerify tool signatureSANDBOX_ERRORSandbox isolation failedCheck sandbox tier compatibilityVALIDATION_ERRORInput validation failedFix input data formatNETWORK_ERRORNetwork request failedCheck endpoint and connectivity"
      },
      {
        "title": "Documentation Links",
        "body": "Full DSL Guide: docs/dsl-guide.md\nDSL Specification: docs/dsl-specification.md\nReasoning Loop Guide: docs/reasoning-loop.md\nExample Agents: agents/README.md (8 production examples)\nRuntime Architecture: docs/runtime-architecture.md\nAPI Reference: docs/api-reference.md\nSecurity Model: docs/security-model.md\nGetting Started: docs/getting-started.md"
      },
      {
        "title": "Pro Tips for AI Assistants",
        "body": "Always Start with Security: Design policies before implementation\nUse Example Agents: Adapt from the 8 production agents in /agents/\nValidate Early: Add input validation at the beginning\nHandle Errors Gracefully: Wrap risky operations in try/catch\nLog Thoughtfully: Audit what matters, never log secrets\nChoose Right Sandbox: Match tier to threat model\nTest Incrementally: Start simple, add features with tests\nDocument Assumptions: Comment complex policy logic\nMonitor Resources: Set realistic limits based on workload\nReview Before Deploy: Use the validation checklist\n\nEnd of SKILLS.md\n\nThis guide prioritizes security, compliance, and best practices for building production-grade Symbiont agents."
      }
    ],
    "body": "Symbiont Agent Development Skills Guide\n\nPurpose: This guide helps AI assistants quickly build secure, compliant Symbiont agents following best practices.\n\nFor Full Documentation: See DSL Guide, DSL Specification, and Example Agents\n\nWhat Makes Symbiont Unique\nORGA Reasoning Loop: Typestate-enforced Observe-Reason-Gate-Act cycle with compile-time phase safety\nCedar Policy Authorization: Formal policy evaluation via cedar-policy crate's Authorizer::is_authorized()\nKnowledge Bridge: Context-aware reasoning with vector-backed retrieval and automatic learning persistence\nDurable Journal: All 7 loop event types recorded for crash recovery and replay without re-calling the LLM\nZero-Trust Security: All inputs untrusted by default, explicit policies required\nPolicy-as-Code: Declarative security rules enforced at runtime\nMulti-Tier Sandboxing: Docker → gVisor → Firecracker isolation\nEnterprise Compliance: HIPAA, SOC2, GDPR patterns built-in\nCryptographic Verification: SchemaPin for MCP tools, AgentPin for agent identity, Ed25519 signatures\nWebhook DX: Signature verification middleware with GitHub/Stripe/Slack presets\nPersistent Memory: Markdown-backed agent memory with retention and compaction\nQuick Start Template\nMinimal Viable Agent\nmetadata {\n    version = \"1.0.0\"\n    author = \"Your Name\"\n    description = \"Brief description of what this agent does\"\n    tags = [\"category\", \"use-case\"]\n}\n\nagent my_agent(input: String) -> String {\n    capabilities = [\"process_data\", \"validate_input\"]\n\n    policy security_policy {\n        // Allow specific operations\n        allow: [\"read_input\", \"write_output\"] if input.length() < 10000\n\n        // Deny dangerous operations\n        deny: [\"network_access\", \"file_system\"] if true\n\n        // Require conditions\n        require: {\n            input_validation: true,\n            output_sanitization: true\n        }\n\n        // Audit important actions\n        audit: {\n            log_level: \"info\",\n            include_input: false,  // Don't log sensitive data\n            include_output: true\n        }\n    }\n\n    with memory = \"ephemeral\", security = \"high\", timeout = 30000 {\n        try {\n            // Validate input\n            if input.is_empty() {\n                return error(\"Input cannot be empty\");\n            }\n\n            // Process data\n            let result = process(input);\n\n            // Return result\n            return result;\n\n        } catch (error) {\n            log(\"ERROR\", \"Processing failed: \" + error.message);\n            return error(\"Processing failed\");\n        }\n    }\n}\n\nfn process(data: String) -> String {\n    // Your processing logic here\n    return data.to_uppercase();\n}\n\nAgentic Reasoning Loop (ORGA Cycle)\n\nThe reasoning loop drives autonomous agent behavior through a typestate-enforced cycle:\n\nObserve — Collect results from previous tool executions\nReason — LLM produces proposed actions (tool calls or text responses)\nGate — Policy engine evaluates each proposed action\nAct — Approved actions are dispatched to tool executors\nMinimal Reasoning Loop\nuse std::sync::Arc;\nuse symbi_runtime::reasoning::{\n    ReasoningLoopRunner, LoopConfig, Conversation, ConversationMessage,\n    circuit_breaker::CircuitBreakerRegistry,\n    context_manager::DefaultContextManager,\n    executor::DefaultActionExecutor,\n    loop_types::BufferedJournal,\n    policy_bridge::DefaultPolicyGate,\n};\nuse symbi_runtime::types::AgentId;\n\nlet runner = ReasoningLoopRunner {\n    provider: Arc::new(my_inference_provider),\n    policy_gate: Arc::new(DefaultPolicyGate::permissive()),\n    executor: Arc::new(DefaultActionExecutor::default()),\n    context_manager: Arc::new(DefaultContextManager::default()),\n    circuit_breakers: Arc::new(CircuitBreakerRegistry::default()),\n    journal: Arc::new(BufferedJournal::new(1000)),\n    knowledge_bridge: None, // Optional: add KnowledgeBridge for RAG\n};\n\nlet mut conv = Conversation::with_system(\"You are a helpful agent.\");\nconv.push(ConversationMessage::user(\"What is 6 * 7?\"));\n\nlet result = runner\n    .run(AgentId::new(), conv, LoopConfig::default())\n    .await;\n\nPhase Transitions (Compile-Time Safe)\n\nInvalid transitions are caught at compile time:\n\nReasoning → PolicyCheck → ToolDispatching → Observing → Reasoning (loop)\n                                                      → Complete (exit)\n\nJournal Events\n\nThe journal records all 7 event types for durable execution:\n\nEvent\tWhen\tPurpose\nStarted\tLoop begin\tConfiguration snapshot\nReasoningComplete\tAfter LLM response, before policy\tCrash recovery without re-calling LLM\nPolicyEvaluated\tAfter policy check\tAction counts, denied counts\nToolsDispatched\tAfter tool execution\tTool count, wall-clock duration\nObservationsCollected\tAfter collecting results\tObservation count\nTerminated\tLoop end\tReason, iterations, usage, duration\nRecoveryTriggered\tOn tool failure recovery\tStrategy, error context\nCedar Policy Gate (Feature: cedar)\n\nFormal authorization using the cedar-policy crate:\n\nuse symbi_runtime::reasoning::cedar_gate::{CedarPolicyGate, CedarPolicy};\n\nlet gate = CedarPolicyGate::deny_by_default();\n\n// Cedar policies use entity types: Agent (principal), Action (action), Resource (resource)\ngate.add_policy(CedarPolicy {\n    name: \"allow_respond\".into(),\n    source: r#\"permit(principal, action == Action::\"respond\", resource);\"#.into(),\n    active: true,\n}).await;\n\ngate.add_policy(CedarPolicy {\n    name: \"deny_search\".into(),\n    source: r#\"forbid(principal, action == Action::\"tool_call::search\", resource);\"#.into(),\n    active: true,\n}).await;\n\n\nAction mapping: tool_call::<name>, respond, delegate::<target>, terminate.\n\nCedar semantics enforced: forbid overrides permit, default deny, skip-on-error.\n\nKnowledge Bridge (Optional)\n\nAdd context-aware reasoning with vector-backed retrieval:\n\nuse symbi_runtime::reasoning::KnowledgeBridge;\n\nlet bridge = Arc::new(KnowledgeBridge::new(knowledge_config));\n\nlet runner = ReasoningLoopRunner {\n    // ... other fields ...\n    knowledge_bridge: Some(bridge),\n};\n\n\nThe bridge injects relevant context before each reasoning step and persists learnings after loop completion.\n\nSecurity-First Policy Patterns\n1. Data Processing Agent (Read/Transform/Write)\npolicy data_processing_policy {\n    // Allow data operations with size limits\n    allow: [\n        \"read_data\",\n        \"transform_data\",\n        \"write_output\"\n    ] if request.data_size < 10_000_000  // 10MB limit\n\n    // Deny dangerous operations\n    deny: [\n        \"execute_code\",\n        \"spawn_process\",\n        \"network_access\"\n    ] if true\n\n    // Require validation\n    require: {\n        input_validation: true,\n        output_sanitization: true,\n        rate_limiting: \"100/minute\"\n    }\n\n    // Audit with PII protection\n    audit: {\n        log_level: \"info\",\n        include_input: false,      // Protect PII\n        include_output: false,     // Protect PII\n        include_metadata: true,\n        retention_days: 90\n    }\n}\n\n2. API Integration Agent (External Calls)\npolicy api_integration_policy {\n    // Allow specific endpoints only\n    allow: [\n        \"https_request\"\n    ] if request.url.starts_with(\"https://api.trusted-service.com/\")\n\n    // Deny everything else\n    deny: [\n        \"http_request\",           // Only HTTPS\n        \"file_access\",\n        \"database_access\"\n    ] if true\n\n    // Require security measures\n    require: {\n        tls_verification: true,\n        api_key_rotation: \"30_days\",\n        rate_limiting: \"1000/hour\",\n        timeout: \"5000ms\"\n    }\n\n    // Audit all API calls\n    audit: {\n        log_level: \"info\",\n        include_request_headers: true,\n        include_response_status: true,\n        include_latency: true,\n        alert_on_errors: true\n    }\n}\n\n3. Security Scanning Agent (Audit/Compliance)\npolicy security_scanner_policy {\n    // Allow read-only scanning\n    allow: [\n        \"read_files\",\n        \"analyze_code\",\n        \"check_dependencies\",\n        \"validate_configs\"\n    ] if scan.depth <= 5  // Limit recursion\n\n    // Deny modifications\n    deny: [\n        \"write_files\",\n        \"modify_permissions\",\n        \"execute_code\"\n    ] if true\n\n    // Require strict controls\n    require: {\n        signature_verification: true,\n        checksum_validation: true,\n        sandbox_tier: \"Tier2\",  // gVisor isolation\n        max_scan_time: \"300000ms\"  // 5 minutes\n    }\n\n    // Audit findings\n    audit: {\n        log_level: \"warning\",\n        include_findings: true,\n        include_risk_scores: true,\n        alert_on_critical: true,\n        compliance_tags: [\"HIPAA\", \"SOC2\"]\n    }\n}\n\n4. Workflow Orchestration Agent (Multi-Step)\npolicy orchestration_policy {\n    // Allow agent coordination\n    allow: [\n        \"invoke_agent\",\n        \"message_passing\",\n        \"state_management\"\n    ] if orchestration.depth < 10  // Prevent infinite loops\n\n    // Deny resource-intensive ops\n    deny: [\n        \"spawn_unlimited_agents\",\n        \"recursive_orchestration\"\n    ] if true\n\n    // Require controls\n    require: {\n        max_concurrent_agents: 50,\n        total_timeout: \"600000ms\",  // 10 minutes\n        failure_recovery: \"retry_with_backoff\",\n        circuit_breaker: true\n    }\n\n    // Audit workflow\n    audit: {\n        log_level: \"info\",\n        include_workflow_graph: true,\n        include_timing: true,\n        include_dependencies: true,\n        trace_id: true\n    }\n}\n\nSandbox Tier Selection Guide\nTier\tTechnology\tUse Case\tPerformance\tSecurity\tOverhead\nTier1\tDocker\tGeneral workloads\tFast\tGood\tLow (~100ms)\nTier2\tgVisor\tUntrusted code\tMedium\tHigh\tMedium (~500ms)\nTier3\tFirecracker\tMulti-tenant isolation\tSlower\tMaximum\tHigh (~2s)\nNative\tProcess only\tDevelopment ONLY\tFastest\tNone\tMinimal\n\nSelection Guide:\n\nTier1 (Docker): Default choice for most agents\nTier2 (gVisor): Processing external data, user-provided code\nTier3 (Firecracker): Highly sensitive, regulatory compliance\nNative: NEVER use in production (development/testing only)\nDSL Syntax Cheatsheet\nType System\n// Primitives\nlet name: String = \"value\";\nlet count: Integer = 42;\nlet price: Float = 19.99;\nlet active: Boolean = true;\nlet data: Bytes = [0x01, 0x02, 0x03];\n\n// Collections\nlet tags: Array<String> = [\"tag1\", \"tag2\"];\nlet config: Map<String, String> = {\"key\": \"value\"};\nlet unique: Set<Integer> = {1, 2, 3};\n\n// Security-Aware Types\nlet sensitive: EncryptedData<String> = encrypt(\"secret\");\nlet private_data: PrivateData<Integer> = private(123);\nlet verified: VerifiableResult<String> = sign(\"data\");\n\n// Optional Types\nlet optional: Optional<String> = Some(\"value\");\nlet none_value: Optional<String> = None;\n\nControl Flow\n// If/Else\nif condition {\n    // true branch\n} else if other_condition {\n    // else if branch\n} else {\n    // false branch\n}\n\n// Match (Pattern Matching)\nmatch value {\n    Some(x) => process(x),\n    None => default_value,\n    Error(e) => handle_error(e)\n}\n\n// Loops\nfor item in collection {\n    process(item);\n}\n\nwhile condition {\n    do_work();\n}\n\n// Error Handling\ntry {\n    risky_operation();\n} catch (error) {\n    log(\"ERROR\", error.message);\n    return error(\"Operation failed\");\n}\n\nPolicy Language\npolicy policy_name {\n    // Allow operations with conditions\n    allow: [\"operation1\", \"operation2\"] if condition\n    allow: \"operation3\" if complex.condition && other.check\n\n    // Deny operations\n    deny: [\"dangerous_op\"] if true\n    deny: \"risky_op\" if environment == \"production\"\n\n    // Required conditions\n    require: {\n        authentication: true,\n        authorization: \"role:admin\",\n        encryption: \"AES-256-GCM\",\n        rate_limit: \"100/second\"\n    }\n\n    // Audit specification\n    audit: {\n        log_level: \"info\" | \"warning\" | \"error\",\n        include_input: boolean,\n        include_output: boolean,\n        retention_days: integer,\n        compliance_tags: array<string>\n    }\n}\n\nWith Block (Execution Context)\nwith\n    memory = \"ephemeral\" | \"persistent\",\n    privacy = \"strict\" | \"medium\" | \"low\",\n    security = \"high\" | \"medium\" | \"low\",\n    sandbox = \"Tier1\" | \"Tier2\" | \"Tier3\",\n    timeout = milliseconds,\n    requires = [\"clearance:level5\", \"approval:manager\"]\n{\n    // Agent implementation\n}\n\nIntegration Patterns\n1. Secrets Management (Vault/OpenBao)\nagent secure_api_caller(endpoint: String) -> String {\n    policy secret_access {\n        allow: [\"read_secret\"] if secret.path.starts_with(\"application/\")\n        deny: [\"write_secret\", \"delete_secret\"] if true\n\n        require: {\n            vault_auth: true,\n            token_rotation: \"1_hour\"\n        }\n\n        audit: {\n            log_level: \"warning\",\n            include_secret_path: true,\n            include_secret_value: false  // NEVER log secrets\n        }\n    }\n\n    with memory = \"ephemeral\", security = \"high\" {\n        // Reference secrets using vault:// protocol\n        let api_key = vault://application/api/key;\n        let api_secret = vault://application/api/secret;\n\n        // Use secrets in API call\n        let response = http.post(endpoint, {\n            headers: {\n                \"Authorization\": \"Bearer \" + api_key,\n                \"X-API-Secret\": api_secret\n            }\n        });\n\n        return response.body;\n    }\n}\n\n2. MCP Tool Integration (Cryptographic Verification)\nagent mcp_tool_user(tool_name: String, input: String) -> String {\n    capabilities = [\"invoke_mcp_tool\"]\n\n    policy mcp_security {\n        // Only allow verified tools\n        allow: [\"mcp_invoke\"] if tool.verified == true\n        deny: [\"mcp_invoke\"] if tool.signature_invalid\n\n        require: {\n            schema_pin_verification: true,  // ECDSA P-256\n            tofu_trust_model: true,         // Trust-On-First-Use\n            tool_review_required: false     // Auto for signed tools\n        }\n\n        audit: {\n            log_level: \"info\",\n            include_tool_signature: true,\n            include_tool_schema: true\n        }\n    }\n\n    with security = \"high\" {\n        // Discover and invoke MCP tool\n        let tool = mcp.discover(tool_name);\n\n        // Verify cryptographic signature\n        if !tool.verify_signature() {\n            return error(\"Tool signature verification failed\");\n        }\n\n        // Invoke tool\n        let result = mcp.invoke(tool, input);\n        return result;\n    }\n}\n\n3. HTTP Webhook Processing\nagent webhook_processor(request: HttpRequest) -> HttpResponse {\n    capabilities = [\"process_webhook\", \"validate_signature\"]\n\n    policy webhook_policy {\n        allow: [\"parse_json\", \"validate_data\"] if request.size < 1_000_000\n        deny: [\"execute_code\", \"file_access\"] if true\n\n        require: {\n            signature_verification: true,\n            rate_limiting: \"1000/minute\",\n            timeout: \"5000ms\"\n        }\n\n        audit: {\n            log_level: \"info\",\n            include_request_id: true,\n            include_source_ip: true,\n            alert_on_invalid_signature: true\n        }\n    }\n\n    with memory = \"ephemeral\", timeout = 5000 {\n        // Verify webhook signature (e.g., GitHub, Stripe)\n        let signature = request.headers[\"X-Webhook-Signature\"];\n        let secret = vault://webhooks/secret;\n\n        if !verify_hmac_sha256(request.body, secret, signature) {\n            return HttpResponse(401, \"Invalid signature\");\n        }\n\n        // Parse and process webhook\n        let data = json.parse(request.body);\n        let result = process_event(data);\n\n        return HttpResponse(200, json.stringify(result));\n    }\n}\n\n4. Scheduled Execution\nmetadata {\n    schedule = \"0 */6 * * *\"  // Every 6 hours (cron format)\n}\n\nagent scheduled_cleanup() -> String {\n    capabilities = [\"cleanup_data\", \"archival\"]\n\n    policy cleanup_policy {\n        allow: [\"read_old_data\", \"archive\", \"delete\"] if data.age > 90_days\n        deny: [\"delete\"] if data.age <= 90_days\n\n        require: {\n            backup_verification: true,\n            retention_check: true\n        }\n\n        audit: {\n            log_level: \"warning\",\n            include_deleted_count: true,\n            include_archived_count: true\n        }\n    }\n\n    with memory = \"persistent\", timeout = 300000 {\n        let old_data = query_old_data(90);\n        let archived_count = archive_data(old_data);\n        let deleted_count = delete_archived_data(old_data);\n\n        return \"Archived: \" + archived_count + \", Deleted: \" + deleted_count;\n    }\n}\n\n5. Persistent Memory (DSL Configuration)\n// Top-level memory block — configures Markdown-backed agent memory\nmemory agent_memory {\n    store    markdown           // Storage backend (markdown only for now)\n    path     \"data/agents\"     // Root directory for memory files\n    retention 90d              // How long daily logs are kept\n    search {\n        vector_weight  0.7     // Semantic similarity weight\n        keyword_weight 0.3     // BM25 keyword match weight\n    }\n}\n\n\nMemory files are human-readable Markdown stored at data/agents/{agent_id}/memory.md with sections for Facts, Procedures, and Learned Patterns. Daily interaction logs are appended to logs/{date}.md and compacted based on retention policy.\n\nREPL Commands:\n\n:memory inspect <agent-id> — Display agent's memory.md\n:memory compact <agent-id> — Flush daily logs, remove expired entries\n:memory purge <agent-id> — Delete all memory for an agent\n6. Webhook Endpoints (DSL Configuration)\n// Top-level webhook block — defines verified webhook endpoints\nwebhook github_events {\n    path     \"/hooks/github\"\n    provider github                              // Preset: github, stripe, slack, custom\n    secret   \"secret://vault/github-webhook-secret\"  // HMAC secret (supports vault refs)\n    agent    code_review_agent                   // Route to this agent\n    filter {\n        json_path \"$.action\"\n        equals    \"opened\"                       // Only process \"opened\" events\n    }\n}\n\n\nProvider presets configure signature verification automatically:\n\ngithub: X-Hub-Signature-256 header, sha256= prefix, HMAC-SHA256\nstripe: Stripe-Signature header, HMAC-SHA256\nslack: X-Slack-Signature header, v0= prefix, HMAC-SHA256\ncustom: X-Signature header, HMAC-SHA256\n\nAll signatures are verified using constant-time comparison before the request reaches the agent handler. Invalid signatures return HTTP 401.\n\nREPL Commands:\n\n:webhook list — Show configured webhook definitions\n7. Persistent Memory & RAG Engine\nagent knowledge_assistant(query: String) -> String {\n    capabilities = [\"semantic_search\", \"rag_retrieval\", \"synthesis\"]\n\n    policy knowledge_policy {\n        allow: [\n            \"vector_search\",\n            \"knowledge_retrieval\",\n            \"context_synthesis\"\n        ] if query.length() < 1000\n\n        deny: [\"knowledge_modification\"] if true\n\n        require: {\n            embedding_model: \"all-MiniLM-L6-v2\",\n            similarity_threshold: 0.7,\n            max_results: 10\n        }\n\n        audit: {\n            log_level: \"info\",\n            include_query: true,\n            include_relevance_scores: true\n        }\n    }\n\n    with memory = \"persistent\", security = \"medium\" {\n        // Semantic search in vector database\n        let context = rag.search(query, {\n            top_k: 5,\n            similarity_threshold: 0.7\n        });\n\n        // Synthesize response\n        let response = synthesize(query, context);\n\n        // Store interaction for future learning\n        memory.store({\n            query: query,\n            response: response,\n            timestamp: now()\n        });\n\n        return response;\n    }\n}\n\n8. Inter-Agent Communication\nagent coordinator(task: String) -> String {\n    capabilities = [\"message_passing\", \"agent_coordination\"]\n\n    policy coordination_policy {\n        allow: [\n            \"send_message\",\n            \"receive_message\",\n            \"invoke_agent\"\n        ] if coordination.depth < 5\n\n        deny: [\"broadcast\"] if true  // Prevent message storms\n\n        require: {\n            message_encryption: true,  // AES-256-GCM\n            message_signing: true,     // Ed25519\n            max_concurrent_agents: 10\n        }\n\n        audit: {\n            log_level: \"info\",\n            include_message_flow: true,\n            include_agent_graph: true\n        }\n    }\n\n    with memory = \"persistent\" {\n        // Invoke specialized agent\n        let validator_response = agent.invoke(\"data_validator\", {\n            data: task\n        });\n\n        // Send encrypted message to another agent\n        agent.send_message(\"processor_agent\", {\n            type: \"process_request\",\n            payload: validator_response,\n            priority: \"high\"\n        });\n\n        // Wait for response\n        let result = agent.receive_message(timeout = 10000);\n\n        return result.payload;\n    }\n}\n\nCommon Agent Patterns\nPattern 1: Data Validation Pipeline\nagent data_validator(data: String, schema: String) -> ValidationResult {\n    capabilities = [\"schema_validation\", \"data_quality_check\"]\n\n    policy validation_policy {\n        allow: [\"parse_schema\", \"validate_data\", \"quality_scoring\"]\n        deny: [\"modify_data\", \"execute_code\"]\n        require: {\n            max_data_size: \"10MB\",\n            timeout: \"5000ms\"\n        }\n        audit: {\n            log_level: \"warning\",\n            include_validation_errors: true\n        }\n    }\n\n    with memory = \"ephemeral\", security = \"high\" {\n        try {\n            // Parse schema\n            let parsed_schema = json.parse(schema);\n\n            // Validate against schema\n            let validation = validate(data, parsed_schema);\n\n            // Calculate quality score\n            let quality_score = calculate_quality(data);\n\n            return ValidationResult {\n                valid: validation.success,\n                errors: validation.errors,\n                quality_score: quality_score,\n                recommendations: generate_recommendations(validation)\n            };\n\n        } catch (error) {\n            return ValidationResult {\n                valid: false,\n                errors: [error.message],\n                quality_score: 0.0,\n                recommendations: []\n            };\n        }\n    }\n}\n\nPattern 2: Format Converter\nagent format_converter(data: String, from_format: String, to_format: String) -> String {\n    capabilities = [\"parse_format\", \"transform_data\", \"serialize_format\"]\n\n    policy conversion_policy {\n        allow: [\"parse\", \"transform\", \"serialize\"] if data.size < 50_000_000\n        deny: [\"execute_code\", \"file_access\"]\n        require: {\n            supported_formats: [\"json\", \"xml\", \"yaml\", \"csv\"],\n            charset_validation: true\n        }\n        audit: {\n            log_level: \"info\",\n            include_conversion_stats: true\n        }\n    }\n\n    with memory = \"ephemeral\", timeout = 10000 {\n        // Validate formats\n        if !is_supported(from_format) || !is_supported(to_format) {\n            return error(\"Unsupported format\");\n        }\n\n        // Parse source format\n        let parsed = parse(data, from_format);\n\n        // Transform to intermediate representation\n        let transformed = normalize(parsed);\n\n        // Serialize to target format\n        let result = serialize(transformed, to_format);\n\n        return result;\n    }\n}\n\nPattern 3: API Aggregator\nagent api_aggregator(sources: Array<String>) -> AggregatedData {\n    capabilities = [\"parallel_requests\", \"data_normalization\", \"deduplication\"]\n\n    policy aggregation_policy {\n        allow: [\"https_request\"] if url in sources\n        deny: [\"http_request\", \"file_access\"]\n        require: {\n            tls_verification: true,\n            concurrent_limit: 10,\n            timeout_per_request: \"3000ms\",\n            total_timeout: \"15000ms\"\n        }\n        audit: {\n            log_level: \"info\",\n            include_source_latencies: true,\n            alert_on_source_failure: true\n        }\n    }\n\n    with memory = \"ephemeral\", timeout = 15000 {\n        let results = [];\n\n        // Parallel fetch from all sources\n        for source in sources {\n            async {\n                try {\n                    let response = http.get(source, {\n                        timeout: 3000,\n                        verify_tls: true\n                    });\n                    results.push(response.json());\n                } catch (error) {\n                    log(\"WARNING\", \"Source failed: \" + source);\n                }\n            }\n        }\n\n        // Wait for all requests\n        await_all(results);\n\n        // Normalize and deduplicate\n        let normalized = normalize_data(results);\n        let deduplicated = deduplicate(normalized);\n\n        return AggregatedData {\n            sources: sources.length,\n            records: deduplicated.length,\n            data: deduplicated\n        };\n    }\n}\n\nPattern 4: Security Scanner\nagent security_scanner(target: String, scan_type: String) -> ScanReport {\n    capabilities = [\n        \"vulnerability_detection\",\n        \"dependency_analysis\",\n        \"compliance_check\"\n    ]\n\n    policy scanner_policy {\n        allow: [\n            \"read_files\",\n            \"analyze_dependencies\",\n            \"check_vulnerabilities\"\n        ] if scan.depth <= 10\n\n        deny: [\n            \"write_files\",\n            \"execute_code\",\n            \"network_access\"\n        ]\n\n        require: {\n            sandbox_tier: \"Tier2\",  // gVisor isolation\n            cvss_scoring: true,\n            cwe_classification: true\n        }\n\n        audit: {\n            log_level: \"warning\",\n            include_findings: true,\n            include_cvss_scores: true,\n            compliance_tags: [\"OWASP\", \"CWE\"]\n        }\n    }\n\n    with memory = \"ephemeral\", security = \"high\", sandbox = \"Tier2\" {\n        let findings = [];\n\n        // Scan based on type\n        match scan_type {\n            \"dependencies\" => {\n                findings = scan_dependencies(target);\n            },\n            \"vulnerabilities\" => {\n                findings = scan_vulnerabilities(target);\n            },\n            \"compliance\" => {\n                findings = check_compliance(target, [\"HIPAA\", \"SOC2\"]);\n            },\n            _ => {\n                return error(\"Unknown scan type\");\n            }\n        }\n\n        // Calculate risk score\n        let risk_score = calculate_risk(findings);\n\n        return ScanReport {\n            target: target,\n            scan_type: scan_type,\n            findings_count: findings.length,\n            critical_count: count_by_severity(findings, \"CRITICAL\"),\n            high_count: count_by_severity(findings, \"HIGH\"),\n            risk_score: risk_score,\n            findings: findings,\n            recommendations: generate_remediation(findings)\n        };\n    }\n}\n\nPattern 5: Notification Router\nagent notification_router(event: Event, routing_rules: RoutingRules) -> String {\n    capabilities = [\"event_filtering\", \"multi_channel_delivery\", \"retry_logic\"]\n\n    policy notification_policy {\n        allow: [\n            \"send_email\",\n            \"send_slack\",\n            \"send_webhook\"\n        ] if event.priority != \"spam\"\n\n        deny: [\"send_sms\"] if event.priority == \"low\"  // Cost control\n\n        require: {\n            rate_limiting: \"100/minute\",\n            retry_attempts: 3,\n            backoff_strategy: \"exponential\"\n        }\n\n        audit: {\n            log_level: \"info\",\n            include_delivery_status: true,\n            include_retry_count: true\n        }\n    }\n\n    with memory = \"ephemeral\" {\n        // Filter event\n        if !should_notify(event, routing_rules) {\n            return \"Event filtered\";\n        }\n\n        // Determine channels\n        let channels = select_channels(event, routing_rules);\n\n        // Send notifications with retry\n        let results = [];\n        for channel in channels {\n            let success = send_with_retry(channel, event, max_attempts = 3);\n            results.push({channel: channel, success: success});\n        }\n\n        return format_results(results);\n    }\n}\n\nPattern 6: Workflow Orchestrator\nagent workflow_orchestrator(workflow_spec: WorkflowSpec) -> WorkflowResult {\n    capabilities = [\n        \"step_execution\",\n        \"dependency_resolution\",\n        \"failure_recovery\"\n    ]\n\n    policy orchestration_policy {\n        allow: [\n            \"invoke_agent\",\n            \"manage_state\",\n            \"handle_errors\"\n        ] if workflow.depth < 10\n\n        deny: [\n            \"recursive_workflows\",\n            \"unlimited_agents\"\n        ]\n\n        require: {\n            max_concurrent_steps: 20,\n            total_timeout: \"600000ms\",  // 10 minutes\n            checkpoint_enabled: true,\n            circuit_breaker: true\n        }\n\n        audit: {\n            log_level: \"info\",\n            include_workflow_graph: true,\n            include_step_timing: true,\n            include_failure_trace: true\n        }\n    }\n\n    with memory = \"persistent\", timeout = 600000 {\n        let state = WorkflowState.new(workflow_spec);\n\n        try {\n            // Execute workflow steps\n            for step in workflow_spec.steps {\n                // Check dependencies\n                if !dependencies_met(step, state) {\n                    await_dependencies(step, state);\n                }\n\n                // Execute step\n                let result = execute_step(step, state);\n\n                // Update state with checkpoint\n                state.complete_step(step.id, result);\n                checkpoint(state);\n            }\n\n            return WorkflowResult {\n                status: \"completed\",\n                outputs: state.collect_outputs(),\n                execution_time: state.elapsed_time()\n            };\n\n        } catch (error) {\n            // Attempt recovery\n            if can_recover(error, state) {\n                let recovered_state = recover_workflow(state);\n                return resume_workflow(recovered_state);\n            }\n\n            return WorkflowResult {\n                status: \"failed\",\n                error: error.message,\n                completed_steps: state.completed_steps(),\n                checkpoint: state.last_checkpoint()\n            };\n        }\n    }\n}\n\nSecurity Anti-Patterns to Avoid\n❌ Anti-Pattern 1: Missing Policy Definitions\n// BAD: No policies defined\nagent insecure_agent(input: String) -> String {\n    with memory = \"ephemeral\" {\n        return process(input);\n    }\n}\n\n\n✅ Fix: Always define explicit policies\n\nagent secure_agent(input: String) -> String {\n    policy security_policy {\n        allow: [\"process_data\"] if input.length() < 10000\n        deny: [\"network_access\", \"file_access\"]\n        require: {input_validation: true}\n        audit: {log_level: \"info\"}\n    }\n\n    with memory = \"ephemeral\" {\n        return process(input);\n    }\n}\n\n❌ Anti-Pattern 2: Overly Permissive Policies\n// BAD: Allows everything\npolicy bad_policy {\n    allow: \"*\" if true\n}\n\n\n✅ Fix: Use principle of least privilege\n\npolicy good_policy {\n    // Only allow what's needed\n    allow: [\"read_data\", \"write_output\"] if authorized\n    // Explicitly deny risky operations\n    deny: [\"execute_code\", \"network_access\", \"file_system\"]\n    require: {authentication: true}\n}\n\n❌ Anti-Pattern 3: No Resource Limits\n// BAD: No timeout, unlimited memory\nwith memory = \"persistent\" {\n    // Could run forever or consume unlimited memory\n    while true {\n        expensive_operation();\n    }\n}\n\n\n✅ Fix: Always set resource limits\n\nwith\n    memory = \"ephemeral\",       // Use ephemeral when possible\n    timeout = 30000,            // 30 second timeout\n    max_memory_mb = 512,        // Memory limit\n    max_cpu_cores = 1.0         // CPU limit\n{\n    for item in limited_dataset {\n        process(item);\n    }\n}\n\n❌ Anti-Pattern 4: Logging Sensitive Data\n// BAD: Logs passwords and secrets\naudit: {\n    log_level: \"info\",\n    include_input: true,   // Will log passwords!\n    include_output: true\n}\n\n\n✅ Fix: Never log sensitive data\n\naudit: {\n    log_level: \"info\",\n    include_input: false,      // Protect PII/secrets\n    include_output: false,     // Protect PII/secrets\n    include_metadata: true,    // OK to log metadata\n    include_timing: true       // OK to log performance\n}\n\n❌ Anti-Pattern 5: Hardcoded Secrets\n// BAD: API key hardcoded\nlet api_key = \"sk_live_abc123xyz789\";\n\n\n✅ Fix: Use Vault references\n\n// GOOD: Secret from Vault\nlet api_key = vault://application/api/key;\n\n❌ Anti-Pattern 6: No Input Validation\n// BAD: No validation\nagent bad_agent(input: String) -> String {\n    return execute_command(input);  // Command injection risk!\n}\n\n\n✅ Fix: Always validate input\n\nagent good_agent(input: String) -> String {\n    // Validate input\n    if !is_valid_input(input) {\n        return error(\"Invalid input\");\n    }\n\n    // Sanitize before use\n    let sanitized = sanitize(input);\n    return safe_process(sanitized);\n}\n\n❌ Anti-Pattern 7: Wrong Sandbox Tier\n// BAD: Processing untrusted code in Tier1\nagent code_runner(untrusted_code: String) -> String {\n    with sandbox = \"Tier1\" {  // Not enough isolation!\n        return eval(untrusted_code);\n    }\n}\n\n\n✅ Fix: Use appropriate sandbox tier\n\nagent code_runner(untrusted_code: String) -> String {\n    policy strict_isolation {\n        deny: [\"file_access\", \"network_access\"]\n        require: {sandbox_tier: \"Tier3\"}\n    }\n\n    with sandbox = \"Tier3\" {  // Firecracker microVM\n        return safe_eval(untrusted_code);\n    }\n}\n\n❌ Anti-Pattern 8: No Error Handling\n// BAD: Unhandled errors will crash agent\nagent fragile_agent(url: String) -> String {\n    let response = http.get(url);  // What if this fails?\n    return response.body;\n}\n\n\n✅ Fix: Always handle errors\n\nagent robust_agent(url: String) -> String {\n    try {\n        let response = http.get(url, timeout = 5000);\n\n        if response.status != 200 {\n            return error(\"HTTP error: \" + response.status);\n        }\n\n        return response.body;\n\n    } catch (error) {\n        log(\"ERROR\", \"Request failed: \" + error.message);\n        return error(\"Request failed\");\n    }\n}\n\nValidation Checklist\n\nBefore deploying an agent, verify:\n\nSecurity Checklist\n Policies defined for all operations\n Principle of least privilege applied (deny by default)\n Sandbox tier appropriate for workload\n Secrets referenced via Vault (never hardcoded)\n Input validation present for all user inputs\n Output sanitization prevents injection attacks\n No sensitive data in audit logs\nResource Management\n Timeout configured appropriately\n Memory limits set based on workload\n CPU limits defined\n Concurrency limits for agent invocations\n Rate limiting configured for external calls\nError Handling\n Try/catch blocks around risky operations\n Error messages are informative but not leaking secrets\n Retry logic for transient failures\n Circuit breakers for cascading failures\n Graceful degradation when dependencies fail\nCompliance & Audit\n Audit logging configured\n Compliance tags added (HIPAA, SOC2, GDPR as needed)\n Retention policies set appropriately\n PII handling follows regulations\n Data encryption at rest and in transit\nTesting\n Unit tests for core functions\n Integration tests with dependencies\n Security tests (injection, overflow, etc.)\n Performance tests (within resource limits)\n Chaos tests (failure scenarios)\nQuick Reference\nBuilt-in Functions\nCategory\tFunction\tPurpose\nString\tlen(s)\tString length\n\tto_upper(s)\tConvert to uppercase\n\tto_lower(s)\tConvert to lowercase\n\ttrim(s)\tRemove whitespace\n\tsplit(s, delim)\tSplit string\n\tcontains(s, substr)\tCheck substring\nJSON\tjson.parse(s)\tParse JSON string\n\tjson.stringify(obj)\tConvert to JSON\n\tjson.validate(s, schema)\tValidate against schema\nHTTP\thttp.get(url, opts)\tHTTP GET request\n\thttp.post(url, body, opts)\tHTTP POST request\n\tverify_hmac_sha256(data, secret, sig)\tVerify HMAC signature\nCrypto\tencrypt(data)\tAES-256-GCM encrypt\n\tdecrypt(data)\tAES-256-GCM decrypt\n\thash_sha256(data)\tSHA-256 hash\n\tsign(data)\tEd25519 signature\nTime\tnow()\tCurrent timestamp\n\tsleep(ms)\tSleep for milliseconds\n\tformat_time(ts, fmt)\tFormat timestamp\nLogging\tlog(level, msg)\tLog message\n\tdebug(msg)\tDebug log\n\tinfo(msg)\tInfo log\n\twarn(msg)\tWarning log\n\terror(msg)\tError log\nValidation\tis_valid_email(s)\tEmail validation\n\tis_valid_url(s)\tURL validation\n\tis_valid_json(s)\tJSON validation\nArrays\tpush(arr, item)\tAdd to array\n\tpop(arr)\tRemove from array\n\tmap(arr, fn)\tMap function\n\tfilter(arr, fn)\tFilter array\n\treduce(arr, fn, init)\tReduce array\nResource Limit Recommendations\nWorkload Type\tMemory\tCPU\tTimeout\tSandbox\nData Validation\t256MB\t0.5\t5s\tTier1\nFormat Conversion\t512MB\t1.0\t10s\tTier1\nAPI Integration\t512MB\t1.0\t15s\tTier1\nCode Analysis\t1GB\t2.0\t30s\tTier2\nSecurity Scan\t2GB\t2.0\t60s\tTier2\nML Inference\t4GB\t4.0\t120s\tTier2\nWorkflow Orchestration\t1GB\t1.0\t600s\tTier1\nUntrusted Code\t512MB\t1.0\t10s\tTier3\nCommon Error Codes\nCode\tMeaning\tResolution\nPOLICY_VIOLATION\tOperation denied by policy\tCheck policy allow/deny rules\nRESOURCE_EXCEEDED\tResource limit reached\tIncrease limits or optimize code\nTIMEOUT\tExecution timeout\tIncrease timeout or optimize\nAUTH_FAILED\tAuthentication failed\tCheck Vault credentials\nSIGNATURE_INVALID\tCrypto signature invalid\tVerify tool signature\nSANDBOX_ERROR\tSandbox isolation failed\tCheck sandbox tier compatibility\nVALIDATION_ERROR\tInput validation failed\tFix input data format\nNETWORK_ERROR\tNetwork request failed\tCheck endpoint and connectivity\nDocumentation Links\nFull DSL Guide: docs/dsl-guide.md\nDSL Specification: docs/dsl-specification.md\nReasoning Loop Guide: docs/reasoning-loop.md\nExample Agents: agents/README.md (8 production examples)\nRuntime Architecture: docs/runtime-architecture.md\nAPI Reference: docs/api-reference.md\nSecurity Model: docs/security-model.md\nGetting Started: docs/getting-started.md\nPro Tips for AI Assistants\nAlways Start with Security: Design policies before implementation\nUse Example Agents: Adapt from the 8 production agents in /agents/\nValidate Early: Add input validation at the beginning\nHandle Errors Gracefully: Wrap risky operations in try/catch\nLog Thoughtfully: Audit what matters, never log secrets\nChoose Right Sandbox: Match tier to threat model\nTest Incrementally: Start simple, add features with tests\nDocument Assumptions: Comment complex policy logic\nMonitor Resources: Set realistic limits based on workload\nReview Before Deploy: Use the validation checklist\n\nEnd of SKILLS.md\n\nThis guide prioritizes security, compliance, and best practices for building production-grade Symbiont agents."
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/jaschadub/symbiont",
    "publisherUrl": "https://clawhub.ai/jaschadub/symbiont",
    "owner": "jaschadub",
    "version": "1.0.2",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/symbiont",
    "downloadUrl": "https://openagent3.xyz/downloads/symbiont",
    "agentUrl": "https://openagent3.xyz/skills/symbiont/agent",
    "manifestUrl": "https://openagent3.xyz/skills/symbiont/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/symbiont/agent.md"
  }
}