{
  "schemaVersion": "1.0",
  "item": {
    "slug": "senior-prompt-engineer",
    "name": "Senior Prompt Engineer",
    "source": "tencent",
    "type": "skill",
    "category": "AI 智能",
    "sourceUrl": "https://clawhub.ai/alirezarezvani/senior-prompt-engineer",
    "canonicalUrl": "https://clawhub.ai/alirezarezvani/senior-prompt-engineer",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/senior-prompt-engineer",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=senior-prompt-engineer",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SKILL.md",
      "references/agentic_system_design.md",
      "references/llm_evaluation_frameworks.md",
      "references/prompt_engineering_patterns.md",
      "scripts/agent_orchestrator.py",
      "scripts/prompt_optimizer.py"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-30T16:55:25.780Z",
      "expiresAt": "2026-05-07T16:55:25.780Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
        "contentDisposition": "attachment; filename=\"network-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/senior-prompt-engineer"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/senior-prompt-engineer",
    "agentPageUrl": "https://openagent3.xyz/skills/senior-prompt-engineer/agent",
    "manifestUrl": "https://openagent3.xyz/skills/senior-prompt-engineer/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/senior-prompt-engineer/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Senior Prompt Engineer",
        "body": "Prompt engineering patterns, LLM evaluation frameworks, and agentic system design."
      },
      {
        "title": "Table of Contents",
        "body": "Quick Start\nTools Overview\n\nPrompt Optimizer\nRAG Evaluator\nAgent Orchestrator\n\n\nPrompt Engineering Workflows\n\nPrompt Optimization Workflow\nFew-Shot Example Design\nStructured Output Design\n\n\nReference Documentation\nCommon Patterns Quick Reference"
      },
      {
        "title": "Quick Start",
        "body": "# Analyze and optimize a prompt file\npython scripts/prompt_optimizer.py prompts/my_prompt.txt --analyze\n\n# Evaluate RAG retrieval quality\npython scripts/rag_evaluator.py --contexts contexts.json --questions questions.json\n\n# Visualize agent workflow from definition\npython scripts/agent_orchestrator.py agent_config.yaml --visualize"
      },
      {
        "title": "1. Prompt Optimizer",
        "body": "Analyzes prompts for token efficiency, clarity, and structure. Generates optimized versions.\n\nInput: Prompt text file or string\nOutput: Analysis report with optimization suggestions\n\nUsage:\n\n# Analyze a prompt file\npython scripts/prompt_optimizer.py prompt.txt --analyze\n\n# Output:\n# Token count: 847\n# Estimated cost: $0.0025 (GPT-4)\n# Clarity score: 72/100\n# Issues found:\n#   - Ambiguous instruction at line 3\n#   - Missing output format specification\n#   - Redundant context (lines 12-15 repeat lines 5-8)\n# Suggestions:\n#   1. Add explicit output format: \"Respond in JSON with keys: ...\"\n#   2. Remove redundant context to save 89 tokens\n#   3. Clarify \"analyze\" -> \"list the top 3 issues with severity ratings\"\n\n# Generate optimized version\npython scripts/prompt_optimizer.py prompt.txt --optimize --output optimized.txt\n\n# Count tokens for cost estimation\npython scripts/prompt_optimizer.py prompt.txt --tokens --model gpt-4\n\n# Extract and manage few-shot examples\npython scripts/prompt_optimizer.py prompt.txt --extract-examples --output examples.json"
      },
      {
        "title": "2. RAG Evaluator",
        "body": "Evaluates Retrieval-Augmented Generation quality by measuring context relevance and answer faithfulness.\n\nInput: Retrieved contexts (JSON) and questions/answers\nOutput: Evaluation metrics and quality report\n\nUsage:\n\n# Evaluate retrieval quality\npython scripts/rag_evaluator.py --contexts retrieved.json --questions eval_set.json\n\n# Output:\n# === RAG Evaluation Report ===\n# Questions evaluated: 50\n#\n# Retrieval Metrics:\n#   Context Relevance: 0.78 (target: >0.80)\n#   Retrieval Precision@5: 0.72\n#   Coverage: 0.85\n#\n# Generation Metrics:\n#   Answer Faithfulness: 0.91\n#   Groundedness: 0.88\n#\n# Issues Found:\n#   - 8 questions had no relevant context in top-5\n#   - 3 answers contained information not in context\n#\n# Recommendations:\n#   1. Improve chunking strategy for technical documents\n#   2. Add metadata filtering for date-sensitive queries\n\n# Evaluate with custom metrics\npython scripts/rag_evaluator.py --contexts retrieved.json --questions eval_set.json \\\n    --metrics relevance,faithfulness,coverage\n\n# Export detailed results\npython scripts/rag_evaluator.py --contexts retrieved.json --questions eval_set.json \\\n    --output report.json --verbose"
      },
      {
        "title": "3. Agent Orchestrator",
        "body": "Parses agent definitions and visualizes execution flows. Validates tool configurations.\n\nInput: Agent configuration (YAML/JSON)\nOutput: Workflow visualization, validation report\n\nUsage:\n\n# Validate agent configuration\npython scripts/agent_orchestrator.py agent.yaml --validate\n\n# Output:\n# === Agent Validation Report ===\n# Agent: research_assistant\n# Pattern: ReAct\n#\n# Tools (4 registered):\n#   [OK] web_search - API key configured\n#   [OK] calculator - No config needed\n#   [WARN] file_reader - Missing allowed_paths\n#   [OK] summarizer - Prompt template valid\n#\n# Flow Analysis:\n#   Max depth: 5 iterations\n#   Estimated tokens/run: 2,400-4,800\n#   Potential infinite loop: No\n#\n# Recommendations:\n#   1. Add allowed_paths to file_reader for security\n#   2. Consider adding early exit condition for simple queries\n\n# Visualize agent workflow (ASCII)\npython scripts/agent_orchestrator.py agent.yaml --visualize\n\n# Output:\n# ┌─────────────────────────────────────────┐\n# │            research_assistant           │\n# │              (ReAct Pattern)            │\n# └─────────────────┬───────────────────────┘\n#                   │\n#          ┌────────▼────────┐\n#          │   User Query    │\n#          └────────┬────────┘\n#                   │\n#          ┌────────▼────────┐\n#          │     Think       │◄──────┐\n#          └────────┬────────┘       │\n#                   │                │\n#          ┌────────▼────────┐       │\n#          │   Select Tool   │       │\n#          └────────┬────────┘       │\n#                   │                │\n#     ┌─────────────┼─────────────┐  │\n#     ▼             ▼             ▼  │\n# [web_search] [calculator] [file_reader]\n#     │             │             │  │\n#     └─────────────┼─────────────┘  │\n#                   │                │\n#          ┌────────▼────────┐       │\n#          │    Observe      │───────┘\n#          └────────┬────────┘\n#                   │\n#          ┌────────▼────────┐\n#          │  Final Answer   │\n#          └─────────────────┘\n\n# Export workflow as Mermaid diagram\npython scripts/agent_orchestrator.py agent.yaml --visualize --format mermaid"
      },
      {
        "title": "Prompt Optimization Workflow",
        "body": "Use when improving an existing prompt's performance or reducing token costs.\n\nStep 1: Baseline current prompt\n\npython scripts/prompt_optimizer.py current_prompt.txt --analyze --output baseline.json\n\nStep 2: Identify issues\nReview the analysis report for:\n\nToken waste (redundant instructions, verbose examples)\nAmbiguous instructions (unclear output format, vague verbs)\nMissing constraints (no length limits, no format specification)\n\nStep 3: Apply optimization patterns\n\nIssuePattern to ApplyAmbiguous outputAdd explicit format specificationToo verboseExtract to few-shot examplesInconsistent resultsAdd role/persona framingMissing edge casesAdd constraint boundaries\n\nStep 4: Generate optimized version\n\npython scripts/prompt_optimizer.py current_prompt.txt --optimize --output optimized.txt\n\nStep 5: Compare results\n\npython scripts/prompt_optimizer.py optimized.txt --analyze --compare baseline.json\n# Shows: token reduction, clarity improvement, issues resolved\n\nStep 6: Validate with test cases\nRun both prompts against your evaluation set and compare outputs."
      },
      {
        "title": "Few-Shot Example Design Workflow",
        "body": "Use when creating examples for in-context learning.\n\nStep 1: Define the task clearly\n\nTask: Extract product entities from customer reviews\nInput: Review text\nOutput: JSON with {product_name, sentiment, features_mentioned}\n\nStep 2: Select diverse examples (3-5 recommended)\n\nExample TypePurposeSimple caseShows basic patternEdge caseHandles ambiguityComplex caseMultiple entitiesNegative caseWhat NOT to extract\n\nStep 3: Format consistently\n\nExample 1:\nInput: \"Love my new iPhone 15, the camera is amazing!\"\nOutput: {\"product_name\": \"iPhone 15\", \"sentiment\": \"positive\", \"features_mentioned\": [\"camera\"]}\n\nExample 2:\nInput: \"The laptop was okay but battery life is terrible.\"\nOutput: {\"product_name\": \"laptop\", \"sentiment\": \"mixed\", \"features_mentioned\": [\"battery life\"]}\n\nStep 4: Validate example quality\n\npython scripts/prompt_optimizer.py prompt_with_examples.txt --validate-examples\n# Checks: consistency, coverage, format alignment\n\nStep 5: Test with held-out cases\nEnsure model generalizes beyond your examples."
      },
      {
        "title": "Structured Output Design Workflow",
        "body": "Use when you need reliable JSON/XML/structured responses.\n\nStep 1: Define schema\n\n{\n  \"type\": \"object\",\n  \"properties\": {\n    \"summary\": {\"type\": \"string\", \"maxLength\": 200},\n    \"sentiment\": {\"enum\": [\"positive\", \"negative\", \"neutral\"]},\n    \"confidence\": {\"type\": \"number\", \"minimum\": 0, \"maximum\": 1}\n  },\n  \"required\": [\"summary\", \"sentiment\"]\n}\n\nStep 2: Include schema in prompt\n\nRespond with JSON matching this schema:\n- summary (string, max 200 chars): Brief summary of the content\n- sentiment (enum): One of \"positive\", \"negative\", \"neutral\"\n- confidence (number 0-1): Your confidence in the sentiment\n\nStep 3: Add format enforcement\n\nIMPORTANT: Respond ONLY with valid JSON. No markdown, no explanation.\nStart your response with { and end with }\n\nStep 4: Validate outputs\n\npython scripts/prompt_optimizer.py structured_prompt.txt --validate-schema schema.json"
      },
      {
        "title": "Reference Documentation",
        "body": "FileContainsLoad when user asks aboutreferences/prompt_engineering_patterns.md10 prompt patterns with input/output examples\"which pattern?\", \"few-shot\", \"chain-of-thought\", \"role prompting\"references/llm_evaluation_frameworks.mdEvaluation metrics, scoring methods, A/B testing\"how to evaluate?\", \"measure quality\", \"compare prompts\"references/agentic_system_design.mdAgent architectures (ReAct, Plan-Execute, Tool Use)\"build agent\", \"tool calling\", \"multi-agent\""
      },
      {
        "title": "Common Patterns Quick Reference",
        "body": "PatternWhen to UseExampleZero-shotSimple, well-defined tasks\"Classify this email as spam or not spam\"Few-shotComplex tasks, consistent format neededProvide 3-5 examples before the taskChain-of-ThoughtReasoning, math, multi-step logic\"Think step by step...\"Role PromptingExpertise needed, specific perspective\"You are an expert tax accountant...\"Structured OutputNeed parseable JSON/XMLInclude schema + format enforcement"
      },
      {
        "title": "Common Commands",
        "body": "# Prompt Analysis\npython scripts/prompt_optimizer.py prompt.txt --analyze          # Full analysis\npython scripts/prompt_optimizer.py prompt.txt --tokens           # Token count only\npython scripts/prompt_optimizer.py prompt.txt --optimize         # Generate optimized version\n\n# RAG Evaluation\npython scripts/rag_evaluator.py --contexts ctx.json --questions q.json  # Evaluate\npython scripts/rag_evaluator.py --contexts ctx.json --compare baseline  # Compare to baseline\n\n# Agent Development\npython scripts/agent_orchestrator.py agent.yaml --validate       # Validate config\npython scripts/agent_orchestrator.py agent.yaml --visualize      # Show workflow\npython scripts/agent_orchestrator.py agent.yaml --estimate-cost  # Token estimation"
      }
    ],
    "body": "Senior Prompt Engineer\n\nPrompt engineering patterns, LLM evaluation frameworks, and agentic system design.\n\nTable of Contents\nQuick Start\nTools Overview\nPrompt Optimizer\nRAG Evaluator\nAgent Orchestrator\nPrompt Engineering Workflows\nPrompt Optimization Workflow\nFew-Shot Example Design\nStructured Output Design\nReference Documentation\nCommon Patterns Quick Reference\nQuick Start\n# Analyze and optimize a prompt file\npython scripts/prompt_optimizer.py prompts/my_prompt.txt --analyze\n\n# Evaluate RAG retrieval quality\npython scripts/rag_evaluator.py --contexts contexts.json --questions questions.json\n\n# Visualize agent workflow from definition\npython scripts/agent_orchestrator.py agent_config.yaml --visualize\n\nTools Overview\n1. Prompt Optimizer\n\nAnalyzes prompts for token efficiency, clarity, and structure. Generates optimized versions.\n\nInput: Prompt text file or string Output: Analysis report with optimization suggestions\n\nUsage:\n\n# Analyze a prompt file\npython scripts/prompt_optimizer.py prompt.txt --analyze\n\n# Output:\n# Token count: 847\n# Estimated cost: $0.0025 (GPT-4)\n# Clarity score: 72/100\n# Issues found:\n#   - Ambiguous instruction at line 3\n#   - Missing output format specification\n#   - Redundant context (lines 12-15 repeat lines 5-8)\n# Suggestions:\n#   1. Add explicit output format: \"Respond in JSON with keys: ...\"\n#   2. Remove redundant context to save 89 tokens\n#   3. Clarify \"analyze\" -> \"list the top 3 issues with severity ratings\"\n\n# Generate optimized version\npython scripts/prompt_optimizer.py prompt.txt --optimize --output optimized.txt\n\n# Count tokens for cost estimation\npython scripts/prompt_optimizer.py prompt.txt --tokens --model gpt-4\n\n# Extract and manage few-shot examples\npython scripts/prompt_optimizer.py prompt.txt --extract-examples --output examples.json\n\n2. RAG Evaluator\n\nEvaluates Retrieval-Augmented Generation quality by measuring context relevance and answer faithfulness.\n\nInput: Retrieved contexts (JSON) and questions/answers Output: Evaluation metrics and quality report\n\nUsage:\n\n# Evaluate retrieval quality\npython scripts/rag_evaluator.py --contexts retrieved.json --questions eval_set.json\n\n# Output:\n# === RAG Evaluation Report ===\n# Questions evaluated: 50\n#\n# Retrieval Metrics:\n#   Context Relevance: 0.78 (target: >0.80)\n#   Retrieval Precision@5: 0.72\n#   Coverage: 0.85\n#\n# Generation Metrics:\n#   Answer Faithfulness: 0.91\n#   Groundedness: 0.88\n#\n# Issues Found:\n#   - 8 questions had no relevant context in top-5\n#   - 3 answers contained information not in context\n#\n# Recommendations:\n#   1. Improve chunking strategy for technical documents\n#   2. Add metadata filtering for date-sensitive queries\n\n# Evaluate with custom metrics\npython scripts/rag_evaluator.py --contexts retrieved.json --questions eval_set.json \\\n    --metrics relevance,faithfulness,coverage\n\n# Export detailed results\npython scripts/rag_evaluator.py --contexts retrieved.json --questions eval_set.json \\\n    --output report.json --verbose\n\n3. Agent Orchestrator\n\nParses agent definitions and visualizes execution flows. Validates tool configurations.\n\nInput: Agent configuration (YAML/JSON) Output: Workflow visualization, validation report\n\nUsage:\n\n# Validate agent configuration\npython scripts/agent_orchestrator.py agent.yaml --validate\n\n# Output:\n# === Agent Validation Report ===\n# Agent: research_assistant\n# Pattern: ReAct\n#\n# Tools (4 registered):\n#   [OK] web_search - API key configured\n#   [OK] calculator - No config needed\n#   [WARN] file_reader - Missing allowed_paths\n#   [OK] summarizer - Prompt template valid\n#\n# Flow Analysis:\n#   Max depth: 5 iterations\n#   Estimated tokens/run: 2,400-4,800\n#   Potential infinite loop: No\n#\n# Recommendations:\n#   1. Add allowed_paths to file_reader for security\n#   2. Consider adding early exit condition for simple queries\n\n# Visualize agent workflow (ASCII)\npython scripts/agent_orchestrator.py agent.yaml --visualize\n\n# Output:\n# ┌─────────────────────────────────────────┐\n# │            research_assistant           │\n# │              (ReAct Pattern)            │\n# └─────────────────┬───────────────────────┘\n#                   │\n#          ┌────────▼────────┐\n#          │   User Query    │\n#          └────────┬────────┘\n#                   │\n#          ┌────────▼────────┐\n#          │     Think       │◄──────┐\n#          └────────┬────────┘       │\n#                   │                │\n#          ┌────────▼────────┐       │\n#          │   Select Tool   │       │\n#          └────────┬────────┘       │\n#                   │                │\n#     ┌─────────────┼─────────────┐  │\n#     ▼             ▼             ▼  │\n# [web_search] [calculator] [file_reader]\n#     │             │             │  │\n#     └─────────────┼─────────────┘  │\n#                   │                │\n#          ┌────────▼────────┐       │\n#          │    Observe      │───────┘\n#          └────────┬────────┘\n#                   │\n#          ┌────────▼────────┐\n#          │  Final Answer   │\n#          └─────────────────┘\n\n# Export workflow as Mermaid diagram\npython scripts/agent_orchestrator.py agent.yaml --visualize --format mermaid\n\nPrompt Engineering Workflows\nPrompt Optimization Workflow\n\nUse when improving an existing prompt's performance or reducing token costs.\n\nStep 1: Baseline current prompt\n\npython scripts/prompt_optimizer.py current_prompt.txt --analyze --output baseline.json\n\n\nStep 2: Identify issues Review the analysis report for:\n\nToken waste (redundant instructions, verbose examples)\nAmbiguous instructions (unclear output format, vague verbs)\nMissing constraints (no length limits, no format specification)\n\nStep 3: Apply optimization patterns\n\nIssue\tPattern to Apply\nAmbiguous output\tAdd explicit format specification\nToo verbose\tExtract to few-shot examples\nInconsistent results\tAdd role/persona framing\nMissing edge cases\tAdd constraint boundaries\n\nStep 4: Generate optimized version\n\npython scripts/prompt_optimizer.py current_prompt.txt --optimize --output optimized.txt\n\n\nStep 5: Compare results\n\npython scripts/prompt_optimizer.py optimized.txt --analyze --compare baseline.json\n# Shows: token reduction, clarity improvement, issues resolved\n\n\nStep 6: Validate with test cases Run both prompts against your evaluation set and compare outputs.\n\nFew-Shot Example Design Workflow\n\nUse when creating examples for in-context learning.\n\nStep 1: Define the task clearly\n\nTask: Extract product entities from customer reviews\nInput: Review text\nOutput: JSON with {product_name, sentiment, features_mentioned}\n\n\nStep 2: Select diverse examples (3-5 recommended)\n\nExample Type\tPurpose\nSimple case\tShows basic pattern\nEdge case\tHandles ambiguity\nComplex case\tMultiple entities\nNegative case\tWhat NOT to extract\n\nStep 3: Format consistently\n\nExample 1:\nInput: \"Love my new iPhone 15, the camera is amazing!\"\nOutput: {\"product_name\": \"iPhone 15\", \"sentiment\": \"positive\", \"features_mentioned\": [\"camera\"]}\n\nExample 2:\nInput: \"The laptop was okay but battery life is terrible.\"\nOutput: {\"product_name\": \"laptop\", \"sentiment\": \"mixed\", \"features_mentioned\": [\"battery life\"]}\n\n\nStep 4: Validate example quality\n\npython scripts/prompt_optimizer.py prompt_with_examples.txt --validate-examples\n# Checks: consistency, coverage, format alignment\n\n\nStep 5: Test with held-out cases Ensure model generalizes beyond your examples.\n\nStructured Output Design Workflow\n\nUse when you need reliable JSON/XML/structured responses.\n\nStep 1: Define schema\n\n{\n  \"type\": \"object\",\n  \"properties\": {\n    \"summary\": {\"type\": \"string\", \"maxLength\": 200},\n    \"sentiment\": {\"enum\": [\"positive\", \"negative\", \"neutral\"]},\n    \"confidence\": {\"type\": \"number\", \"minimum\": 0, \"maximum\": 1}\n  },\n  \"required\": [\"summary\", \"sentiment\"]\n}\n\n\nStep 2: Include schema in prompt\n\nRespond with JSON matching this schema:\n- summary (string, max 200 chars): Brief summary of the content\n- sentiment (enum): One of \"positive\", \"negative\", \"neutral\"\n- confidence (number 0-1): Your confidence in the sentiment\n\n\nStep 3: Add format enforcement\n\nIMPORTANT: Respond ONLY with valid JSON. No markdown, no explanation.\nStart your response with { and end with }\n\n\nStep 4: Validate outputs\n\npython scripts/prompt_optimizer.py structured_prompt.txt --validate-schema schema.json\n\nReference Documentation\nFile\tContains\tLoad when user asks about\nreferences/prompt_engineering_patterns.md\t10 prompt patterns with input/output examples\t\"which pattern?\", \"few-shot\", \"chain-of-thought\", \"role prompting\"\nreferences/llm_evaluation_frameworks.md\tEvaluation metrics, scoring methods, A/B testing\t\"how to evaluate?\", \"measure quality\", \"compare prompts\"\nreferences/agentic_system_design.md\tAgent architectures (ReAct, Plan-Execute, Tool Use)\t\"build agent\", \"tool calling\", \"multi-agent\"\nCommon Patterns Quick Reference\nPattern\tWhen to Use\tExample\nZero-shot\tSimple, well-defined tasks\t\"Classify this email as spam or not spam\"\nFew-shot\tComplex tasks, consistent format needed\tProvide 3-5 examples before the task\nChain-of-Thought\tReasoning, math, multi-step logic\t\"Think step by step...\"\nRole Prompting\tExpertise needed, specific perspective\t\"You are an expert tax accountant...\"\nStructured Output\tNeed parseable JSON/XML\tInclude schema + format enforcement\nCommon Commands\n# Prompt Analysis\npython scripts/prompt_optimizer.py prompt.txt --analyze          # Full analysis\npython scripts/prompt_optimizer.py prompt.txt --tokens           # Token count only\npython scripts/prompt_optimizer.py prompt.txt --optimize         # Generate optimized version\n\n# RAG Evaluation\npython scripts/rag_evaluator.py --contexts ctx.json --questions q.json  # Evaluate\npython scripts/rag_evaluator.py --contexts ctx.json --compare baseline  # Compare to baseline\n\n# Agent Development\npython scripts/agent_orchestrator.py agent.yaml --validate       # Validate config\npython scripts/agent_orchestrator.py agent.yaml --visualize      # Show workflow\npython scripts/agent_orchestrator.py agent.yaml --estimate-cost  # Token estimation"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/alirezarezvani/senior-prompt-engineer",
    "publisherUrl": "https://clawhub.ai/alirezarezvani/senior-prompt-engineer",
    "owner": "alirezarezvani",
    "version": "2.1.1",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/senior-prompt-engineer",
    "downloadUrl": "https://openagent3.xyz/downloads/senior-prompt-engineer",
    "agentUrl": "https://openagent3.xyz/skills/senior-prompt-engineer/agent",
    "manifestUrl": "https://openagent3.xyz/skills/senior-prompt-engineer/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/senior-prompt-engineer/agent.md"
  }
}