{
  "schemaVersion": "1.0",
  "item": {
    "slug": "afrexai-ux-research-engine",
    "name": "UX Research Engine",
    "source": "tencent",
    "type": "skill",
    "category": "开发工具",
    "sourceUrl": "https://clawhub.ai/1kalin/afrexai-ux-research-engine",
    "canonicalUrl": "https://clawhub.ai/1kalin/afrexai-ux-research-engine",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/afrexai-ux-research-engine",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=afrexai-ux-research-engine",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "README.md",
      "SKILL.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-05-07T17:22:31.273Z",
      "expiresAt": "2026-05-14T17:22:31.273Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=afrexai-annual-report",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=afrexai-annual-report",
        "contentDisposition": "attachment; filename=\"afrexai-annual-report-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/afrexai-ux-research-engine"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/afrexai-ux-research-engine",
    "agentPageUrl": "https://openagent3.xyz/skills/afrexai-ux-research-engine/agent",
    "manifestUrl": "https://openagent3.xyz/skills/afrexai-ux-research-engine/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/afrexai-ux-research-engine/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "UX Research Engine ⚡",
        "body": "Complete UX research methodology — from discovery to validated design decisions. No scripts, no APIs, no dependencies. Pure agent skill."
      },
      {
        "title": "Research Brief YAML",
        "body": "project: \"[Product/Feature Name]\"\nresearch_question: \"[What do we need to learn?]\"\nbusiness_context:\n  objective: \"[Business goal this research supports]\"\n  decision: \"[What decision will this research inform?]\"\n  stakeholders: [\"PM\", \"Design Lead\", \"Engineering\"]\n  deadline: \"YYYY-MM-DD\"\nscope:\n  product_area: \"[Feature/flow being studied]\"\n  user_segment: \"[Who are we studying?]\"\n  geographic: \"[Regions/markets]\"\nmethodology: \"[See selection matrix below]\"\nsample_size: \"[See calculator below]\"\ntimeline:\n  planning: \"Week 1\"\n  recruiting: \"Week 1-2\"\n  fieldwork: \"Week 2-3\"\n  analysis: \"Week 3-4\"\n  reporting: \"Week 4\"\nbudget:\n  participant_incentives: \"$X\"\n  tools: \"$X\"\n  total: \"$X\"\nsuccess_criteria:\n  - \"[Specific insight we need]\"\n  - \"[Confidence level required]\"\n  - \"[Actionable output format]\""
      },
      {
        "title": "Method Selection Matrix",
        "body": "MethodBest ForSample SizeTimeCostConfidenceUser InterviewsDeep \"why\" understanding, exploring unknowns5-152-4 weeks$$High (qualitative)Usability TestingFinding interaction problems, validating flows5-8 per round1-2 weeks$$High (behavioral)SurveysQuantifying attitudes, measuring satisfaction100-400+1-2 weeks$High (statistical)Card SortingInformation architecture, navigation labels15-30 (open), 30+ (closed)1 week$MediumDiary StudiesLong-term behavior, context of use10-152-6 weeks$$$High (longitudinal)A/B TestingComparing specific design variants1000+ per variant1-4 weeks$Very HighContextual InquiryUnderstanding real environment, workflows4-82-3 weeks$$$Very HighTree TestingValidating IA without visual design50+1 week$HighFirst-Click TestingNavigation effectiveness30-501 week$MediumConcept TestingEarly-stage idea validation8-151-2 weeks$$MediumHeuristic EvaluationExpert review of existing UI3-5 evaluators2-3 days$MediumCompetitive UX AuditUnderstanding market standardsN/A1 week$Low-Medium"
      },
      {
        "title": "Decision Tree: Which Method?",
        "body": "Do you know WHAT the problem is?\n├── NO → Generative Research\n│   ├── Need context? → Contextual Inquiry\n│   ├── Need attitudes? → User Interviews\n│   ├── Need behaviors over time? → Diary Study\n│   └── Need broad patterns? → Survey (exploratory)\n│\n└── YES → Evaluative Research\n    ├── Have a prototype/product?\n    │   ├── YES → Usability Testing\n    │   │   ├── Early concept → Concept Test (paper/low-fi)\n    │   │   ├── Key flow → Task-based Usability Test\n    │   │   └── Comparing options → A/B Test\n    │   └── NO → \n    │       ├── Testing IA → Card Sort / Tree Test\n    │       └── Testing content → First-Click Test\n    └── Need expert opinion fast? → Heuristic Evaluation"
      },
      {
        "title": "Sample Size Calculator",
        "body": "Qualitative (interviews, usability):\n\n5 users find ~85% of usability issues (Nielsen)\n8-12 for thematic saturation in interviews\n15+ for diverse populations or complex domains\nRule: keep going until you hear the same things 3x\n\nQuantitative (surveys):\n\nPopulation90% Confidence ±5%95% Confidence ±5%99% Confidence ±5%1007480875001762172851,00021427839910,000264370622100,000+271384660\n\nA/B Tests:\n\nMDE (Minimum Detectable Effect) drives sample size\n5% MDE, 80% power, 95% confidence → ~1,600 per variant\n2% MDE → ~10,000 per variant\nAlways run for full business cycles (min 1 week)"
      },
      {
        "title": "Screener Template",
        "body": "screener:\n  title: \"[Study Name] Participant Screener\"\n  target_profile:\n    demographics:\n      age_range: \"[e.g., 25-45]\"\n      location: \"[e.g., US-based]\"\n      language: \"[e.g., English-fluent]\"\n    behavioral:\n      product_usage: \"[e.g., Uses [product] 3+ times/week]\"\n      experience_level: \"[e.g., 1+ year with similar tools]\"\n      recent_activity: \"[e.g., Made a purchase in last 30 days]\"\n    psychographic:\n      decision_maker: \"[e.g., Primary household purchaser]\"\n      tech_comfort: \"[e.g., Comfortable with mobile apps]\"\n  \n  screening_questions:\n    - question: \"How often do you use [product category]?\"\n      type: \"single-select\"\n      options: [\"Daily\", \"Weekly\", \"Monthly\", \"Rarely\", \"Never\"]\n      qualify: [\"Daily\", \"Weekly\"]\n      disqualify: [\"Never\"]\n    \n    - question: \"Which of these tools do you currently use?\"\n      type: \"multi-select\"\n      options: [\"Tool A\", \"Tool B\", \"Tool C\", \"None\"]\n      qualify_min: 1\n      \n    - question: \"What is your primary role?\"\n      type: \"single-select\"\n      options: [\"Developer\", \"Designer\", \"PM\", \"Marketing\", \"Other\"]\n      qualify: [\"Developer\", \"Designer\", \"PM\"]\n    \n    - question: \"Have you participated in a UX study in the last 6 months?\"\n      type: \"single-select\"\n      options: [\"Yes\", \"No\"]\n      disqualify: [\"Yes\"]  # Avoid professional participants\n  \n  anti-patterns:\n    - \"Works at a competitor or in UX research\"\n    - \"Family/friends of team members\"\n    - \"Participated in study for this product before\"\n  \n  incentive: \"$75 for 60-min session\"\n  \n  recruiting_channels:\n    - channel: \"Existing user database\"\n      quality: \"★★★★★\"\n      cost: \"Free\"\n    - channel: \"UserTesting.com / UserInterviews.com\"\n      quality: \"★★★★\"\n      cost: \"$50-150/participant\"\n    - channel: \"Social media recruitment\"\n      quality: \"★★★\"\n      cost: \"Free-$$\"\n    - channel: \"Craigslist / local posting\"\n      quality: \"★★\"\n      cost: \"$\""
      },
      {
        "title": "Recruiting Quality Checklist",
        "body": "Screener doesn't lead (no \"right\" answers obvious)\n Mix of demographics within target segment\n No more than 20% from single recruiting source\n At least 1 \"edge case\" participant (power user, new user, accessibility needs)\n Over-recruit by 20% for no-shows\n Consent form prepared and sent in advance\n Incentive delivery method confirmed"
      },
      {
        "title": "Interview Guide Template",
        "body": "# Interview Guide: [Study Name]\nDuration: 60 minutes\nModerator: [Name]\n\n## Setup (5 min)\n- Thank participant, confirm recording consent\n- \"There are no right or wrong answers — we're learning from YOUR experience\"\n- \"Feel free to be critical — honest feedback helps us improve\"\n- \"I didn't design this, so you won't hurt my feelings\"\n\n## Warm-Up (5 min)\n- \"Tell me about your role and what a typical day looks like\"\n- \"How does [product area] fit into your work?\"\n\n## Core Questions (35 min)\n\n### Context & Current Behavior\n1. \"Walk me through the last time you [did the task we're studying]\"\n   - Probe: \"What happened next?\"\n   - Probe: \"How did that make you feel?\"\n   - Probe: \"What would you have preferred to happen?\"\n\n2. \"What tools/methods do you currently use for [task]?\"\n   - Probe: \"What do you like about that approach?\"\n   - Probe: \"What frustrates you?\"\n   - Probe: \"How long have you been doing it this way?\"\n\n3. \"Can you show me how you typically [task]?\" (if remote: screen share)\n\n### Pain Points & Needs\n4. \"What's the hardest part about [task]?\"\n   - Probe: \"How often does that happen?\"\n   - Probe: \"What do you do when that happens?\"\n   - Probe: \"How much time/money does that cost you?\"\n\n5. \"If you could wave a magic wand and change one thing about [experience], what would it be?\"\n\n6. \"Tell me about a time when [process] went really wrong\"\n   - Probe: \"What was the impact?\"\n   - Probe: \"How was it resolved?\"\n\n### Mental Models\n7. \"How would you explain [concept] to a colleague?\"\n8. \"What do you expect to happen when you [action]?\"\n9. \"Where would you look for [information/feature]?\"\n\n### Priorities & Trade-offs\n10. \"If you had to choose between [speed vs accuracy / ease vs power], which matters more? Why?\"\n\n## Concept Reaction (10 min) — if applicable\n- Show prototype/concept\n- \"What's your first impression?\"\n- \"What would you use this for?\"\n- \"What's missing?\"\n- \"Would this replace what you currently use? Why/why not?\"\n\n## Wrap-Up (5 min)\n- \"Is there anything else about [topic] we should know?\"\n- \"Who else should we talk to about this?\"\n- Thank participant, confirm incentive delivery"
      },
      {
        "title": "Interview Quality Rules",
        "body": "80/20 rule: Participant talks 80%, you talk 20%\nNever ask \"Would you use this?\" — people can't predict future behavior\nAsk about past behavior, not hypothetical futures\nFollow the energy — when they get animated, dig deeper\nSilence is a tool — pause 5 seconds after they answer; they'll elaborate\n\"Tell me more about that\" — your most powerful phrase\nWatch for say/do gaps — note when claimed behavior contradicts observed behavior\nRecord everything — audio minimum, video ideal, notes always"
      },
      {
        "title": "Note-Taking Template (Per Interview)",
        "body": "participant:\n  id: \"P01\"\n  date: \"YYYY-MM-DD\"\n  demographics: \"[age, role, experience level]\"\n  session_duration: \"58 min\"\n\nkey_quotes:\n  - quote: \"[Exact words]\"\n    timestamp: \"12:34\"\n    context: \"[What prompted this]\"\n    theme: \"[Emerging theme tag]\"\n\nobservations:\n  behaviors:\n    - \"[What they DID, not what they said]\"\n  emotions:\n    - \"[Frustration when..., delight when..., confusion at...]\"\n  workarounds:\n    - \"[Creative solutions they've built]\"\n\npain_points:\n  - pain: \"[Specific problem]\"\n    severity: \"[1-5]\"\n    frequency: \"[daily/weekly/monthly/rarely]\"\n    current_solution: \"[How they cope]\"\n    \nneeds:\n  - need: \"[Unmet need identified]\"\n    type: \"[functional/emotional/social]\"\n    evidence: \"[Quote or behavior that reveals this]\"\n\nsurprises:\n  - \"[Anything unexpected — these are gold]\"\n\nmoderator_notes:\n  - \"[Post-session reflection, what to adjust for next interview]\""
      },
      {
        "title": "Data-Driven Persona Template",
        "body": "persona:\n  name: \"[Realistic name — not cutesy]\"\n  photo: \"[Representative stock photo description]\"\n  archetype: \"[1-3 word label, e.g., 'The Overwhelmed Manager']\"\n  \n  demographics:\n    age: \"[Range or specific]\"\n    role: \"[Job title / life stage]\"\n    experience: \"[Years with product/domain]\"\n    tech_proficiency: \"[Novice / Intermediate / Advanced / Expert]\"\n    environment: \"[Office / remote / mobile / field]\"\n  \n  # MOST IMPORTANT SECTION\n  goals:\n    primary: \"[The #1 thing they're trying to accomplish]\"\n    secondary:\n      - \"[Supporting goal]\"\n      - \"[Supporting goal]\"\n    underlying: \"[The emotional/social need behind the functional goal]\"\n  \n  frustrations:\n    - frustration: \"[Specific pain point]\"\n      frequency: \"[How often — from research data]\"\n      severity: \"[1-5]\"\n      current_workaround: \"[What they do today]\"\n      evidence: \"[P03, P07, P11 mentioned this]\"\n  \n  behaviors:\n    usage_pattern: \"[When, where, how often they engage]\"\n    decision_process: \"[How they evaluate options]\"\n    information_sources: \"[Where they learn / get help]\"\n    social_influence: \"[Who influences their decisions]\"\n    key_workflows:\n      - \"[Task 1 — frequency — duration]\"\n      - \"[Task 2 — frequency — duration]\"\n  \n  mental_models:\n    - \"[How they think about [concept] — often surprising]\"\n    - \"[Vocabulary they use — not our jargon]\"\n  \n  motivations:\n    gains: \"[What success looks like to them]\"\n    fears: \"[What failure looks like]\"\n    triggers: \"[What prompts them to act]\"\n    barriers: \"[What stops them from acting]\"\n  \n  quotes:\n    - \"\\\"[Real quote from research that captures this persona]\\\"\"\n    - \"\\\"[Another revealing quote]\\\"\"\n  \n  design_implications:\n    must_have:\n      - \"[Feature/quality this persona absolutely needs]\"\n    should_have:\n      - \"[Important but not dealbreaker]\"\n    must_avoid:\n      - \"[Things that will drive this persona away]\"\n    communication_style: \"[How to talk to this persona]\"\n  \n  data_sources:\n    interviews: \"[# of participants who map to this persona]\"\n    survey_segment: \"[% of survey respondents]\"\n    analytics_cohort: \"[Behavioral data that identifies this group]\""
      },
      {
        "title": "Persona Validation Checklist",
        "body": "Based on real research data, not assumptions\n Represents a meaningful segment (not 1 outlier)\n Goals are specific enough to design for\n Frustrations include frequency + severity (not just a list)\n Contains at least 2 real quotes\n Design implications are actionable\n Reviewed with 3+ stakeholders\n Cross-checked against analytics data\n Does NOT describe everyone (a good persona excludes people)"
      },
      {
        "title": "Anti-Personas (Who We're NOT Designing For)",
        "body": "anti_persona:\n  name: \"[Label]\"\n  description: \"[Who this is]\"\n  why_excluded: \"[Business reason — too small a segment, wrong market, etc.]\"\n  risk_if_included: \"[What happens to the product if we try to serve them too]\""
      },
      {
        "title": "Journey Map Template",
        "body": "journey_map:\n  title: \"[Persona] — [Goal/Scenario]\"\n  persona: \"[Which persona]\"\n  scenario: \"[Specific situation triggering this journey]\"\n  \n  stages:\n    - stage: \"1. Awareness / Trigger\"\n      duration: \"[Time in this stage]\"\n      goals: \"[What they want to accomplish]\"\n      actions:\n        - \"[Step they take]\"\n        - \"[Step they take]\"\n      touchpoints:\n        - \"[Where they interact — website, app, email, phone, in-person]\"\n      thoughts:\n        - \"\\\"[What they're thinking — from research]\\\"\"\n      emotions:\n        rating: 3  # 1=frustrated, 3=neutral, 5=delighted\n        feeling: \"[Curious but uncertain]\"\n      pain_points:\n        - \"[Problem encountered]\"\n      opportunities:\n        - \"[How we could improve this moment]\"\n    \n    - stage: \"2. Consideration / Research\"\n      # ... same structure\n    \n    - stage: \"3. Decision / Sign-Up\"\n      # ... same structure\n    \n    - stage: \"4. Onboarding / First Use\"\n      # ... same structure\n    \n    - stage: \"5. Regular Use / Value Realization\"\n      # ... same structure\n    \n    - stage: \"6. Expansion / Advocacy (or Churn)\"\n      # ... same structure\n  \n  moments_of_truth:\n    - moment: \"[Critical make-or-break interaction]\"\n      stage: \"[Which stage]\"\n      current_experience: \"[What happens now — score 1-5]\"\n      desired_experience: \"[What should happen — score 1-5]\"\n      gap: \"[Difference = priority]\"\n      \n  service_blueprint_layer:  # Optional — behind-the-scenes\n    - stage: \"[Stage name]\"\n      frontstage: \"[What user sees]\"\n      backstage: \"[What team does]\"\n      support_systems: \"[Tools/processes involved]\"\n      failure_points: \"[Where things break down]\""
      },
      {
        "title": "Emotion Curve Scoring",
        "body": "Plot emotions across the journey:\n\n5 ★ Delighted  ──────────╮          ╭──\n4 ☺ Happy               │          │\n3 😐 Neutral    ──╮      │    ╭─────╯\n2 😟 Frustrated    │      │    │\n1 😤 Angry         ╰──────╯────╯\n                  Stage1  Stage2  Stage3  Stage4  Stage5"
      },
      {
        "title": "Journey Map Quality Rules",
        "body": "Based on research, not assumptions (note data source for each insight)\nOne persona per map (don't average)\nInclude BOTH functional and emotional dimensions\nIdentify \"moments of truth\" — the 2-3 interactions that make or break the experience\nPrioritize opportunities by gap size (desired minus current)\nInclude backstage/blueprint layer for service design"
      },
      {
        "title": "Test Plan Template",
        "body": "usability_test:\n  study_name: \"[Name]\"\n  objective: \"[What design question are we answering?]\"\n  \n  format:\n    type: \"[Moderated / Unmoderated]\"\n    location: \"[Remote / In-person / Lab]\"\n    device: \"[Desktop / Mobile / Tablet / Cross-device]\"\n    duration: \"60 min\"\n    recording: \"[Screen + audio + face camera]\"\n  \n  prototype:\n    fidelity: \"[Paper / Wireframe / Hi-fi / Live product]\"\n    tool: \"[Figma / InVision / Live URL]\"\n    scope: \"[Which flows are testable]\"\n    known_limitations: \"[What won't work in the prototype]\"\n  \n  participants:\n    target: 5-8\n    criteria: \"[From screener — link to Phase 2]\"\n    incentive: \"$75\"\n  \n  tasks:\n    - task_id: \"T1\"\n      scenario: \"You need to [context]. Using this app, [goal].\"\n      success_criteria: \n        - \"[Specific completion definition]\"\n      time_limit: \"5 min\"\n      priority: \"critical\"  # critical / important / nice-to-know\n      metrics:\n        - completion_rate\n        - time_on_task\n        - error_count\n        - satisfaction_rating\n    \n    - task_id: \"T2\"\n      scenario: \"[Next task...]\"\n      # ... same structure\n  \n  post_task_questions:\n    - \"On a scale of 1-7, how easy was that? (SEQ)\"\n    - \"What did you expect to happen when you [action]?\"\n    - \"Was anything confusing?\"\n  \n  post_test_questions:\n    - \"SUS (System Usability Scale) — 10 questions\"\n    - \"What was the easiest part?\"\n    - \"What was the most frustrating part?\"\n    - \"Would you use this? Why/why not?\"\n    - \"What's missing?\""
      },
      {
        "title": "Task Writing Rules",
        "body": "Set the scene — give context, not instructions (\"You want to book a flight to NYC next Friday\" NOT \"Click the search button\")\nDon't use interface words — say \"find\" not \"navigate to,\" say \"purchase\" not \"add to cart and checkout\"\nMake it realistic — use scenarios from actual research data\nOne goal per task — don't combine (\"book a flight AND a hotel\")\nOrder: easy → hard — build confidence before complex tasks"
      },
      {
        "title": "Severity Rating Scale",
        "body": "SeverityLabelDefinitionAction0Not a problemDisagreement among evaluators, no real issueNone1CosmeticNoticed but doesn't affect task completionFix if time allows2MinorCauses hesitation or minor inefficiencySchedule fix3MajorCauses significant difficulty, workarounds neededFix before launch4CatastrophicPrevents task completion entirelyFix immediately"
      },
      {
        "title": "Usability Finding Template",
        "body": "finding:\n  id: \"UF-001\"\n  title: \"[Short descriptive title]\"\n  severity: 3  # 0-4\n  frequency: \"4/5 participants\"\n  task: \"T2\"\n  \n  observation: \"[What happened — factual, behavioral]\"\n  evidence:\n    - participant: \"P01\"\n      behavior: \"[What they did]\"\n      quote: \"\\\"[What they said]\\\"\"\n      timestamp: \"14:22\"\n    - participant: \"P03\"\n      behavior: \"[What they did]\"\n  \n  root_cause: \"[Why this happened — mental model mismatch, visibility, feedback, etc.]\"\n  \n  recommendation:\n    change: \"[Specific design change]\"\n    rationale: \"[Why this will fix it]\"\n    effort: \"[S/M/L]\"\n    impact: \"[High/Medium/Low]\"\n    \n  heuristic_violated: \"[Which Nielsen heuristic, if applicable]\""
      },
      {
        "title": "Nielsen's 10 Heuristics (Quick Reference)",
        "body": "#HeuristicWhat to Check1Visibility of system statusLoading indicators, progress bars, confirmation messages2Match real worldLabels match user language, not internal jargon3User control & freedomUndo, back, cancel, exit are easy to find4Consistency & standardsSame action = same result everywhere5Error preventionConfirmations, constraints, smart defaults6Recognition > recallOptions visible, not memorized7Flexibility & efficiencyShortcuts for experts, simple for novices8Aesthetic & minimalistNo unnecessary information competing for attention9Error recoveryClear error messages with solutions, not codes10Help & documentationSearchable, task-focused, concise"
      },
      {
        "title": "Heuristic Evaluation Scorecard",
        "body": "Rate each heuristic 1-5 per screen/flow:\n\nheuristic_audit:\n  screen: \"[Screen/Flow name]\"\n  evaluator: \"[Name]\"\n  date: \"YYYY-MM-DD\"\n  \n  scores:\n    visibility_of_status: 4\n    real_world_match: 3\n    user_control: 2\n    consistency: 4\n    error_prevention: 3\n    recognition_over_recall: 4\n    flexibility_efficiency: 2\n    aesthetic_minimal: 3\n    error_recovery: 1\n    help_documentation: 2\n  \n  total: 28  # out of 50\n  grade: \"C\"  # A=45+, B=38+, C=28+, D=20+, F=<20\n  \n  critical_issues:\n    - heuristic: \"Error recovery\"\n      location: \"[Where]\"\n      issue: \"[What's wrong]\"\n      fix: \"[Recommendation]\""
      },
      {
        "title": "Affinity Mapping Process",
        "body": "Extract: Pull every observation, quote, behavior onto individual notes\nCluster: Group similar notes (bottom-up, not top-down)\nLabel: Name each cluster with a theme (use participant language)\nHierarchy: Group clusters into meta-themes\nPrioritize: Rank by frequency × impact"
      },
      {
        "title": "Theme Template",
        "body": "theme:\n  name: \"[Theme label — use participant language]\"\n  description: \"[2-3 sentence summary]\"\n  \n  evidence:\n    participant_count: \"8/12 participants\"\n    segments_affected: [\"Persona A\", \"Persona B\"]\n    \n    quotes:\n      - participant: \"P03\"\n        quote: \"\\\"[Exact quote]\\\"\"\n      - participant: \"P07\"\n        quote: \"\\\"[Exact quote]\\\"\"\n    \n    behaviors_observed:\n      - \"[What they did]\"\n      - \"[Pattern across participants]\"\n    \n    data_points:\n      - \"[Any quantitative support — survey %, analytics, etc.]\"\n  \n  impact:\n    on_users: \"[How this affects their experience]\"\n    on_business: \"[Revenue, retention, acquisition, support cost impact]\"\n    severity: \"High\"  # High / Medium / Low\n  \n  insight: \"[The 'so what' — what does this mean for design?]\"\n  \n  recommendations:\n    - recommendation: \"[Specific, actionable change]\"\n      effort: \"M\"\n      impact: \"High\"\n      confidence: \"High\"  # based on evidence strength"
      },
      {
        "title": "Insight Formula",
        "body": "Every insight must follow: Observation + Evidence + So What + Now What\n\n\"Users consistently [OBSERVATION] — seen in [X/Y participants, with supporting quotes]. This matters because [SO WHAT — impact on goals/business]. We should [NOW WHAT — specific recommendation].\"\n\nBad insight: \"Users found the navigation confusing\"\nGood insight: \"7 of 12 participants couldn't find the settings page within 30 seconds. 4 looked in the profile menu, 2 used search, 1 gave up. This maps to 15% of support tickets ('How do I change my password'). Moving settings to the top-level nav and adding a search shortcut would reduce discovery time and cut related support volume.\""
      },
      {
        "title": "Research Scoring Rubric (0-100)",
        "body": "DimensionWeightCriteriaMethodology Rigor20%Right method for question, adequate sample, proper recruitingData Quality15%Rich observations, real quotes, behavioral evidenceAnalysis Depth20%Beyond surface themes, root causes identified, patterns across segmentsInsight Actionability25%Specific recommendations, effort/impact rated, prioritizedPresentation Clarity10%Stakeholders can understand and act without explanationBusiness Connection10%Findings connected to business metrics and goals\n\nScoring:\n\n90-100: Publication-quality research\n75-89: Strong actionable research\n60-74: Adequate — some gaps in methodology or analysis\n40-59: Weak — findings are surface-level or poorly supported\nBelow 40: Redo — methodology flaws undermine findings"
      },
      {
        "title": "Executive Summary Template",
        "body": "# [Study Name] — Research Report\n\n## TL;DR (3 bullet max)\n- [Most important finding + recommendation]\n- [Second most important finding + recommendation]  \n- [Third most important finding + recommendation]\n\n## Study Overview\n- **Method:** [e.g., 12 semi-structured interviews + 5 usability tests]\n- **Participants:** [e.g., 12 mid-market SaaS PMs, 2-8 years experience]\n- **Duration:** [e.g., 3 weeks, Jan 5-26 2026]\n- **Confidence:** [High / Medium / Low — based on sample + methodology]\n\n## Key Findings\n\n### Finding 1: [Title] ⚠️ [Severity: Critical/High/Medium/Low]\n**What we found:** [2-3 sentences with evidence]\n**Why it matters:** [Business impact]\n**Recommendation:** [Specific action]\n**Effort:** [S/M/L] | **Impact:** [High/Med/Low]\n\n### Finding 2: [Title]\n...\n\n## Personas Updated\n[Link to updated persona YAML files]\n\n## Journey Map\n[Link to journey map]\n\n## Design Recommendations (Prioritized)\n\n| # | Recommendation | Finding | Effort | Impact | Priority |\n|---|---------------|---------|--------|--------|----------|\n| 1 | [Action] | F1 | S | High | P0 — Do now |\n| 2 | [Action] | F3 | M | High | P1 — Next sprint |\n| 3 | [Action] | F2 | L | Medium | P2 — Backlog |\n\n## What We Still Don't Know\n- [Open questions for future research]\n- [Hypotheses to validate]\n\n## Appendix\n- Screener criteria\n- Interview guide\n- Raw data location\n- Participant demographics"
      },
      {
        "title": "Design Critique Framework (CAMPS)",
        "body": "DimensionQuestions to AskClarityCan users understand what this is and what to do within 5 seconds?AlignmentDoes this solve the problem identified in research? For the right persona?Mental ModelDoes it match how users think about this task? (from interview data)PriorityDoes the visual hierarchy match user task priority?SimplicityCan anything be removed without losing function?"
      },
      {
        "title": "Prototype Review Checklist",
        "body": "design_review:\n  screen: \"[Screen name]\"\n  reviewer: \"[Name]\"\n  date: \"YYYY-MM-DD\"\n  \n  research_alignment:\n    - check: \"Addresses top pain point from research\"\n      status: \"✅ / ❌ / ⚠️\"\n      notes: \"[Which finding this addresses]\"\n    - check: \"Uses language from user interviews (not internal jargon)\"\n      status: \"✅ / ❌ / ⚠️\"\n    - check: \"Matches mental model revealed in research\"\n      status: \"✅ / ❌ / ⚠️\"\n    - check: \"Works for primary persona AND doesn't break for secondary\"\n      status: \"✅ / ❌ / ⚠️\"\n  \n  usability:\n    - check: \"Primary action is visually dominant\"\n      status: \"✅ / ❌ / ⚠️\"\n    - check: \"Error states designed and messaged\"\n      status: \"✅ / ❌ / ⚠️\"\n    - check: \"Empty states designed (first use, no data, no results)\"\n      status: \"✅ / ❌ / ⚠️\"\n    - check: \"Loading states designed\"\n      status: \"✅ / ❌ / ⚠️\"\n    - check: \"Edge cases handled (long text, missing data, permissions)\"\n      status: \"✅ / ❌ / ⚠️\"\n  \n  accessibility:\n    - check: \"Color contrast meets WCAG AA (4.5:1 text, 3:1 UI)\"\n      status: \"✅ / ❌ / ⚠️\"\n    - check: \"Touch targets ≥44px\"\n      status: \"✅ / ❌ / ⚠️\"\n    - check: \"Information not conveyed by color alone\"\n      status: \"✅ / ❌ / ⚠️\"\n    - check: \"Logical reading/tab order\"\n      status: \"✅ / ❌ / ⚠️\"\n    - check: \"Alt text for meaningful images\"\n      status: \"✅ / ❌ / ⚠️\"\n  \n  overall_score: \"[1-5]\"\n  ship_decision: \"Ready / Needs changes / Needs testing / Needs research\""
      },
      {
        "title": "Research Repository Structure",
        "body": "research/\n├── YYYY/\n│   ├── Q1/\n│   │   ├── [study-name]/\n│   │   │   ├── plan.yaml          # Research brief\n│   │   │   ├── screener.yaml      # Recruiting criteria\n│   │   │   ├── guide.md           # Interview/test guide\n│   │   │   ├── notes/             # Per-participant notes\n│   │   │   │   ├── P01.yaml\n│   │   │   │   └── P02.yaml\n│   │   │   ├── synthesis/         # Themes, affinity maps\n│   │   │   ├── personas/          # Updated personas\n│   │   │   ├── journey-maps/      # Updated maps\n│   │   │   ├── report.md          # Final report\n│   │   │   └── recordings/        # Session recordings (link)\n│   │   └── [next-study]/\n│   └── Q2/\n├── personas/                      # Master persona library\n│   ├── persona-a.yaml\n│   └── persona-b.yaml\n├── journey-maps/                  # Master journey maps\n├── insights-database.yaml         # Cross-study insight tracker\n└── research-calendar.yaml         # Planned studies"
      },
      {
        "title": "Cross-Study Insight Tracker",
        "body": "insights_database:\n  - insight_id: \"INS-001\"\n    theme: \"[Category]\"\n    insight: \"[The insight]\"\n    first_found: \"2026-01-15\"\n    studies: [\"Study A\", \"Study C\", \"Study F\"]\n    evidence_strength: \"Strong\"  # 3+ studies\n    status: \"Addressed\"  # Open / In Progress / Addressed / Won't Fix\n    design_response: \"[What was done]\"\n    impact_measured: \"[Before/after metric if available]\""
      },
      {
        "title": "Research Impact Tracking",
        "body": "MetricHow to MeasureTargetFindings → shipped features% of recommendations implemented within 2 quarters>60%Pre/post usability scoresSUS score before vs after changes+10 pointsSupport ticket reductionRelated ticket volume after design change-25%Task completion rateUsability test success rate over time>85%Time on taskAverage task time trendDecreasingStakeholder confidencePost-study survey: \"How useful was this?\">4/5"
      },
      {
        "title": "Quick Commands",
        "body": "CommandWhat It Does\"Plan a research study for [topic]\"Generate research brief YAML\"Build a screener for [audience]\"Generate screening questionnaire\"Create interview guide for [topic]\"Generate interview questions and structure\"Build persona from [data/notes]\"Synthesize data into persona YAML\"Map the journey for [persona + goal]\"Generate journey map\"Plan usability test for [prototype]\"Generate test plan with tasks\"Run heuristic evaluation of [screen/flow]\"Score against Nielsen's 10\"Synthesize findings from [study]\"Generate themes and insights\"Write research report for [study]\"Generate executive summary and recommendations\"Score this research [report/study]\"Evaluate against quality rubric\"Review this design against research\"CAMPS critique + alignment check\"Set up research repository\"Create folder structure and templates"
      },
      {
        "title": "Small Budget / No Recruiting Budget",
        "body": "Guerrilla testing: coffee shop intercepts (5 min tests, buy them a coffee)\nInternal users: use colleagues from different departments (not product/design team)\nSocial media: post in relevant communities for volunteers\nExisting users: email opt-in for research panel"
      },
      {
        "title": "Remote-Only Research",
        "body": "Video call with screen share (Zoom, Google Meet)\nAsync: Loom recordings of tasks + written responses\nUnmoderated: UserTesting.com, Maze, Lookback\nDiary studies: use messaging apps (WhatsApp, Telegram) for daily check-ins"
      },
      {
        "title": "Stakeholder Pushback (\"We don't have time for research\")",
        "body": "\"5 users, 1 week, 3 critical findings\" — the minimum viable study\nPair research with existing touchpoints (support calls, sales demos)\nFrame as risk reduction: \"Would you rather discover this before or after launch?\"\nShow past research ROI (support ticket reduction, conversion improvement)"
      },
      {
        "title": "Conflicting Findings",
        "body": "Check sample composition — different segments may have different needs\nPrioritize by business impact: which segment is more valuable?\nRun a survey to quantify: \"60% prefer A, 40% prefer B\"\nConsider designing for both (progressive disclosure, personalization)"
      },
      {
        "title": "International / Cross-Cultural Research",
        "body": "Don't just translate — localize scenarios and contexts\nAccount for cultural response bias (e.g., reluctance to criticize in some cultures)\nUse local moderators when possible\nAdjust incentives to local norms\nWatch for design patterns that don't transfer (icons, colors, reading direction)"
      },
      {
        "title": "Accessibility Research",
        "body": "Recruit participants with disabilities (screen reader users, motor impairments, cognitive differences)\nTest with actual assistive technology, not simulation\nInclude in regular studies (at least 1 participant with accessibility needs per study)\nWCAG compliance testing is NOT a substitute for research with disabled users\n\nBuilt by AfrexAI — Autonomous Intelligence for Business"
      }
    ],
    "body": "UX Research Engine ⚡\n\nComplete UX research methodology — from discovery to validated design decisions. No scripts, no APIs, no dependencies. Pure agent skill.\n\nPhase 1: Research Planning\nResearch Brief YAML\nproject: \"[Product/Feature Name]\"\nresearch_question: \"[What do we need to learn?]\"\nbusiness_context:\n  objective: \"[Business goal this research supports]\"\n  decision: \"[What decision will this research inform?]\"\n  stakeholders: [\"PM\", \"Design Lead\", \"Engineering\"]\n  deadline: \"YYYY-MM-DD\"\nscope:\n  product_area: \"[Feature/flow being studied]\"\n  user_segment: \"[Who are we studying?]\"\n  geographic: \"[Regions/markets]\"\nmethodology: \"[See selection matrix below]\"\nsample_size: \"[See calculator below]\"\ntimeline:\n  planning: \"Week 1\"\n  recruiting: \"Week 1-2\"\n  fieldwork: \"Week 2-3\"\n  analysis: \"Week 3-4\"\n  reporting: \"Week 4\"\nbudget:\n  participant_incentives: \"$X\"\n  tools: \"$X\"\n  total: \"$X\"\nsuccess_criteria:\n  - \"[Specific insight we need]\"\n  - \"[Confidence level required]\"\n  - \"[Actionable output format]\"\n\nMethod Selection Matrix\nMethod\tBest For\tSample Size\tTime\tCost\tConfidence\nUser Interviews\tDeep \"why\" understanding, exploring unknowns\t5-15\t2-4 weeks\t$$\tHigh (qualitative)\nUsability Testing\tFinding interaction problems, validating flows\t5-8 per round\t1-2 weeks\t$$\tHigh (behavioral)\nSurveys\tQuantifying attitudes, measuring satisfaction\t100-400+\t1-2 weeks\t$\tHigh (statistical)\nCard Sorting\tInformation architecture, navigation labels\t15-30 (open), 30+ (closed)\t1 week\t$\tMedium\nDiary Studies\tLong-term behavior, context of use\t10-15\t2-6 weeks\t$$$\tHigh (longitudinal)\nA/B Testing\tComparing specific design variants\t1000+ per variant\t1-4 weeks\t$\tVery High\nContextual Inquiry\tUnderstanding real environment, workflows\t4-8\t2-3 weeks\t$$$\tVery High\nTree Testing\tValidating IA without visual design\t50+\t1 week\t$\tHigh\nFirst-Click Testing\tNavigation effectiveness\t30-50\t1 week\t$\tMedium\nConcept Testing\tEarly-stage idea validation\t8-15\t1-2 weeks\t$$\tMedium\nHeuristic Evaluation\tExpert review of existing UI\t3-5 evaluators\t2-3 days\t$\tMedium\nCompetitive UX Audit\tUnderstanding market standards\tN/A\t1 week\t$\tLow-Medium\nDecision Tree: Which Method?\nDo you know WHAT the problem is?\n├── NO → Generative Research\n│   ├── Need context? → Contextual Inquiry\n│   ├── Need attitudes? → User Interviews\n│   ├── Need behaviors over time? → Diary Study\n│   └── Need broad patterns? → Survey (exploratory)\n│\n└── YES → Evaluative Research\n    ├── Have a prototype/product?\n    │   ├── YES → Usability Testing\n    │   │   ├── Early concept → Concept Test (paper/low-fi)\n    │   │   ├── Key flow → Task-based Usability Test\n    │   │   └── Comparing options → A/B Test\n    │   └── NO → \n    │       ├── Testing IA → Card Sort / Tree Test\n    │       └── Testing content → First-Click Test\n    └── Need expert opinion fast? → Heuristic Evaluation\n\nSample Size Calculator\n\nQualitative (interviews, usability):\n\n5 users find ~85% of usability issues (Nielsen)\n8-12 for thematic saturation in interviews\n15+ for diverse populations or complex domains\nRule: keep going until you hear the same things 3x\n\nQuantitative (surveys):\n\nPopulation\t90% Confidence ±5%\t95% Confidence ±5%\t99% Confidence ±5%\n100\t74\t80\t87\n500\t176\t217\t285\n1,000\t214\t278\t399\n10,000\t264\t370\t622\n100,000+\t271\t384\t660\n\nA/B Tests:\n\nMDE (Minimum Detectable Effect) drives sample size\n5% MDE, 80% power, 95% confidence → ~1,600 per variant\n2% MDE → ~10,000 per variant\nAlways run for full business cycles (min 1 week)\nPhase 2: Participant Recruiting\nScreener Template\nscreener:\n  title: \"[Study Name] Participant Screener\"\n  target_profile:\n    demographics:\n      age_range: \"[e.g., 25-45]\"\n      location: \"[e.g., US-based]\"\n      language: \"[e.g., English-fluent]\"\n    behavioral:\n      product_usage: \"[e.g., Uses [product] 3+ times/week]\"\n      experience_level: \"[e.g., 1+ year with similar tools]\"\n      recent_activity: \"[e.g., Made a purchase in last 30 days]\"\n    psychographic:\n      decision_maker: \"[e.g., Primary household purchaser]\"\n      tech_comfort: \"[e.g., Comfortable with mobile apps]\"\n  \n  screening_questions:\n    - question: \"How often do you use [product category]?\"\n      type: \"single-select\"\n      options: [\"Daily\", \"Weekly\", \"Monthly\", \"Rarely\", \"Never\"]\n      qualify: [\"Daily\", \"Weekly\"]\n      disqualify: [\"Never\"]\n    \n    - question: \"Which of these tools do you currently use?\"\n      type: \"multi-select\"\n      options: [\"Tool A\", \"Tool B\", \"Tool C\", \"None\"]\n      qualify_min: 1\n      \n    - question: \"What is your primary role?\"\n      type: \"single-select\"\n      options: [\"Developer\", \"Designer\", \"PM\", \"Marketing\", \"Other\"]\n      qualify: [\"Developer\", \"Designer\", \"PM\"]\n    \n    - question: \"Have you participated in a UX study in the last 6 months?\"\n      type: \"single-select\"\n      options: [\"Yes\", \"No\"]\n      disqualify: [\"Yes\"]  # Avoid professional participants\n  \n  anti-patterns:\n    - \"Works at a competitor or in UX research\"\n    - \"Family/friends of team members\"\n    - \"Participated in study for this product before\"\n  \n  incentive: \"$75 for 60-min session\"\n  \n  recruiting_channels:\n    - channel: \"Existing user database\"\n      quality: \"★★★★★\"\n      cost: \"Free\"\n    - channel: \"UserTesting.com / UserInterviews.com\"\n      quality: \"★★★★\"\n      cost: \"$50-150/participant\"\n    - channel: \"Social media recruitment\"\n      quality: \"★★★\"\n      cost: \"Free-$$\"\n    - channel: \"Craigslist / local posting\"\n      quality: \"★★\"\n      cost: \"$\"\n\nRecruiting Quality Checklist\n Screener doesn't lead (no \"right\" answers obvious)\n Mix of demographics within target segment\n No more than 20% from single recruiting source\n At least 1 \"edge case\" participant (power user, new user, accessibility needs)\n Over-recruit by 20% for no-shows\n Consent form prepared and sent in advance\n Incentive delivery method confirmed\nPhase 3: User Interviews\nInterview Guide Template\n# Interview Guide: [Study Name]\nDuration: 60 minutes\nModerator: [Name]\n\n## Setup (5 min)\n- Thank participant, confirm recording consent\n- \"There are no right or wrong answers — we're learning from YOUR experience\"\n- \"Feel free to be critical — honest feedback helps us improve\"\n- \"I didn't design this, so you won't hurt my feelings\"\n\n## Warm-Up (5 min)\n- \"Tell me about your role and what a typical day looks like\"\n- \"How does [product area] fit into your work?\"\n\n## Core Questions (35 min)\n\n### Context & Current Behavior\n1. \"Walk me through the last time you [did the task we're studying]\"\n   - Probe: \"What happened next?\"\n   - Probe: \"How did that make you feel?\"\n   - Probe: \"What would you have preferred to happen?\"\n\n2. \"What tools/methods do you currently use for [task]?\"\n   - Probe: \"What do you like about that approach?\"\n   - Probe: \"What frustrates you?\"\n   - Probe: \"How long have you been doing it this way?\"\n\n3. \"Can you show me how you typically [task]?\" (if remote: screen share)\n\n### Pain Points & Needs\n4. \"What's the hardest part about [task]?\"\n   - Probe: \"How often does that happen?\"\n   - Probe: \"What do you do when that happens?\"\n   - Probe: \"How much time/money does that cost you?\"\n\n5. \"If you could wave a magic wand and change one thing about [experience], what would it be?\"\n\n6. \"Tell me about a time when [process] went really wrong\"\n   - Probe: \"What was the impact?\"\n   - Probe: \"How was it resolved?\"\n\n### Mental Models\n7. \"How would you explain [concept] to a colleague?\"\n8. \"What do you expect to happen when you [action]?\"\n9. \"Where would you look for [information/feature]?\"\n\n### Priorities & Trade-offs\n10. \"If you had to choose between [speed vs accuracy / ease vs power], which matters more? Why?\"\n\n## Concept Reaction (10 min) — if applicable\n- Show prototype/concept\n- \"What's your first impression?\"\n- \"What would you use this for?\"\n- \"What's missing?\"\n- \"Would this replace what you currently use? Why/why not?\"\n\n## Wrap-Up (5 min)\n- \"Is there anything else about [topic] we should know?\"\n- \"Who else should we talk to about this?\"\n- Thank participant, confirm incentive delivery\n\nInterview Quality Rules\n80/20 rule: Participant talks 80%, you talk 20%\nNever ask \"Would you use this?\" — people can't predict future behavior\nAsk about past behavior, not hypothetical futures\nFollow the energy — when they get animated, dig deeper\nSilence is a tool — pause 5 seconds after they answer; they'll elaborate\n\"Tell me more about that\" — your most powerful phrase\nWatch for say/do gaps — note when claimed behavior contradicts observed behavior\nRecord everything — audio minimum, video ideal, notes always\nNote-Taking Template (Per Interview)\nparticipant:\n  id: \"P01\"\n  date: \"YYYY-MM-DD\"\n  demographics: \"[age, role, experience level]\"\n  session_duration: \"58 min\"\n\nkey_quotes:\n  - quote: \"[Exact words]\"\n    timestamp: \"12:34\"\n    context: \"[What prompted this]\"\n    theme: \"[Emerging theme tag]\"\n\nobservations:\n  behaviors:\n    - \"[What they DID, not what they said]\"\n  emotions:\n    - \"[Frustration when..., delight when..., confusion at...]\"\n  workarounds:\n    - \"[Creative solutions they've built]\"\n\npain_points:\n  - pain: \"[Specific problem]\"\n    severity: \"[1-5]\"\n    frequency: \"[daily/weekly/monthly/rarely]\"\n    current_solution: \"[How they cope]\"\n    \nneeds:\n  - need: \"[Unmet need identified]\"\n    type: \"[functional/emotional/social]\"\n    evidence: \"[Quote or behavior that reveals this]\"\n\nsurprises:\n  - \"[Anything unexpected — these are gold]\"\n\nmoderator_notes:\n  - \"[Post-session reflection, what to adjust for next interview]\"\n\nPhase 4: Persona Building\nData-Driven Persona Template\npersona:\n  name: \"[Realistic name — not cutesy]\"\n  photo: \"[Representative stock photo description]\"\n  archetype: \"[1-3 word label, e.g., 'The Overwhelmed Manager']\"\n  \n  demographics:\n    age: \"[Range or specific]\"\n    role: \"[Job title / life stage]\"\n    experience: \"[Years with product/domain]\"\n    tech_proficiency: \"[Novice / Intermediate / Advanced / Expert]\"\n    environment: \"[Office / remote / mobile / field]\"\n  \n  # MOST IMPORTANT SECTION\n  goals:\n    primary: \"[The #1 thing they're trying to accomplish]\"\n    secondary:\n      - \"[Supporting goal]\"\n      - \"[Supporting goal]\"\n    underlying: \"[The emotional/social need behind the functional goal]\"\n  \n  frustrations:\n    - frustration: \"[Specific pain point]\"\n      frequency: \"[How often — from research data]\"\n      severity: \"[1-5]\"\n      current_workaround: \"[What they do today]\"\n      evidence: \"[P03, P07, P11 mentioned this]\"\n  \n  behaviors:\n    usage_pattern: \"[When, where, how often they engage]\"\n    decision_process: \"[How they evaluate options]\"\n    information_sources: \"[Where they learn / get help]\"\n    social_influence: \"[Who influences their decisions]\"\n    key_workflows:\n      - \"[Task 1 — frequency — duration]\"\n      - \"[Task 2 — frequency — duration]\"\n  \n  mental_models:\n    - \"[How they think about [concept] — often surprising]\"\n    - \"[Vocabulary they use — not our jargon]\"\n  \n  motivations:\n    gains: \"[What success looks like to them]\"\n    fears: \"[What failure looks like]\"\n    triggers: \"[What prompts them to act]\"\n    barriers: \"[What stops them from acting]\"\n  \n  quotes:\n    - \"\\\"[Real quote from research that captures this persona]\\\"\"\n    - \"\\\"[Another revealing quote]\\\"\"\n  \n  design_implications:\n    must_have:\n      - \"[Feature/quality this persona absolutely needs]\"\n    should_have:\n      - \"[Important but not dealbreaker]\"\n    must_avoid:\n      - \"[Things that will drive this persona away]\"\n    communication_style: \"[How to talk to this persona]\"\n  \n  data_sources:\n    interviews: \"[# of participants who map to this persona]\"\n    survey_segment: \"[% of survey respondents]\"\n    analytics_cohort: \"[Behavioral data that identifies this group]\"\n\nPersona Validation Checklist\n Based on real research data, not assumptions\n Represents a meaningful segment (not 1 outlier)\n Goals are specific enough to design for\n Frustrations include frequency + severity (not just a list)\n Contains at least 2 real quotes\n Design implications are actionable\n Reviewed with 3+ stakeholders\n Cross-checked against analytics data\n Does NOT describe everyone (a good persona excludes people)\nAnti-Personas (Who We're NOT Designing For)\nanti_persona:\n  name: \"[Label]\"\n  description: \"[Who this is]\"\n  why_excluded: \"[Business reason — too small a segment, wrong market, etc.]\"\n  risk_if_included: \"[What happens to the product if we try to serve them too]\"\n\nPhase 5: Journey Mapping\nJourney Map Template\njourney_map:\n  title: \"[Persona] — [Goal/Scenario]\"\n  persona: \"[Which persona]\"\n  scenario: \"[Specific situation triggering this journey]\"\n  \n  stages:\n    - stage: \"1. Awareness / Trigger\"\n      duration: \"[Time in this stage]\"\n      goals: \"[What they want to accomplish]\"\n      actions:\n        - \"[Step they take]\"\n        - \"[Step they take]\"\n      touchpoints:\n        - \"[Where they interact — website, app, email, phone, in-person]\"\n      thoughts:\n        - \"\\\"[What they're thinking — from research]\\\"\"\n      emotions:\n        rating: 3  # 1=frustrated, 3=neutral, 5=delighted\n        feeling: \"[Curious but uncertain]\"\n      pain_points:\n        - \"[Problem encountered]\"\n      opportunities:\n        - \"[How we could improve this moment]\"\n    \n    - stage: \"2. Consideration / Research\"\n      # ... same structure\n    \n    - stage: \"3. Decision / Sign-Up\"\n      # ... same structure\n    \n    - stage: \"4. Onboarding / First Use\"\n      # ... same structure\n    \n    - stage: \"5. Regular Use / Value Realization\"\n      # ... same structure\n    \n    - stage: \"6. Expansion / Advocacy (or Churn)\"\n      # ... same structure\n  \n  moments_of_truth:\n    - moment: \"[Critical make-or-break interaction]\"\n      stage: \"[Which stage]\"\n      current_experience: \"[What happens now — score 1-5]\"\n      desired_experience: \"[What should happen — score 1-5]\"\n      gap: \"[Difference = priority]\"\n      \n  service_blueprint_layer:  # Optional — behind-the-scenes\n    - stage: \"[Stage name]\"\n      frontstage: \"[What user sees]\"\n      backstage: \"[What team does]\"\n      support_systems: \"[Tools/processes involved]\"\n      failure_points: \"[Where things break down]\"\n\nEmotion Curve Scoring\n\nPlot emotions across the journey:\n\n5 ★ Delighted  ──────────╮          ╭──\n4 ☺ Happy               │          │\n3 😐 Neutral    ──╮      │    ╭─────╯\n2 😟 Frustrated    │      │    │\n1 😤 Angry         ╰──────╯────╯\n                  Stage1  Stage2  Stage3  Stage4  Stage5\n\nJourney Map Quality Rules\nBased on research, not assumptions (note data source for each insight)\nOne persona per map (don't average)\nInclude BOTH functional and emotional dimensions\nIdentify \"moments of truth\" — the 2-3 interactions that make or break the experience\nPrioritize opportunities by gap size (desired minus current)\nInclude backstage/blueprint layer for service design\nPhase 6: Usability Testing\nTest Plan Template\nusability_test:\n  study_name: \"[Name]\"\n  objective: \"[What design question are we answering?]\"\n  \n  format:\n    type: \"[Moderated / Unmoderated]\"\n    location: \"[Remote / In-person / Lab]\"\n    device: \"[Desktop / Mobile / Tablet / Cross-device]\"\n    duration: \"60 min\"\n    recording: \"[Screen + audio + face camera]\"\n  \n  prototype:\n    fidelity: \"[Paper / Wireframe / Hi-fi / Live product]\"\n    tool: \"[Figma / InVision / Live URL]\"\n    scope: \"[Which flows are testable]\"\n    known_limitations: \"[What won't work in the prototype]\"\n  \n  participants:\n    target: 5-8\n    criteria: \"[From screener — link to Phase 2]\"\n    incentive: \"$75\"\n  \n  tasks:\n    - task_id: \"T1\"\n      scenario: \"You need to [context]. Using this app, [goal].\"\n      success_criteria: \n        - \"[Specific completion definition]\"\n      time_limit: \"5 min\"\n      priority: \"critical\"  # critical / important / nice-to-know\n      metrics:\n        - completion_rate\n        - time_on_task\n        - error_count\n        - satisfaction_rating\n    \n    - task_id: \"T2\"\n      scenario: \"[Next task...]\"\n      # ... same structure\n  \n  post_task_questions:\n    - \"On a scale of 1-7, how easy was that? (SEQ)\"\n    - \"What did you expect to happen when you [action]?\"\n    - \"Was anything confusing?\"\n  \n  post_test_questions:\n    - \"SUS (System Usability Scale) — 10 questions\"\n    - \"What was the easiest part?\"\n    - \"What was the most frustrating part?\"\n    - \"Would you use this? Why/why not?\"\n    - \"What's missing?\"\n\nTask Writing Rules\nSet the scene — give context, not instructions (\"You want to book a flight to NYC next Friday\" NOT \"Click the search button\")\nDon't use interface words — say \"find\" not \"navigate to,\" say \"purchase\" not \"add to cart and checkout\"\nMake it realistic — use scenarios from actual research data\nOne goal per task — don't combine (\"book a flight AND a hotel\")\nOrder: easy → hard — build confidence before complex tasks\nSeverity Rating Scale\nSeverity\tLabel\tDefinition\tAction\n0\tNot a problem\tDisagreement among evaluators, no real issue\tNone\n1\tCosmetic\tNoticed but doesn't affect task completion\tFix if time allows\n2\tMinor\tCauses hesitation or minor inefficiency\tSchedule fix\n3\tMajor\tCauses significant difficulty, workarounds needed\tFix before launch\n4\tCatastrophic\tPrevents task completion entirely\tFix immediately\nUsability Finding Template\nfinding:\n  id: \"UF-001\"\n  title: \"[Short descriptive title]\"\n  severity: 3  # 0-4\n  frequency: \"4/5 participants\"\n  task: \"T2\"\n  \n  observation: \"[What happened — factual, behavioral]\"\n  evidence:\n    - participant: \"P01\"\n      behavior: \"[What they did]\"\n      quote: \"\\\"[What they said]\\\"\"\n      timestamp: \"14:22\"\n    - participant: \"P03\"\n      behavior: \"[What they did]\"\n  \n  root_cause: \"[Why this happened — mental model mismatch, visibility, feedback, etc.]\"\n  \n  recommendation:\n    change: \"[Specific design change]\"\n    rationale: \"[Why this will fix it]\"\n    effort: \"[S/M/L]\"\n    impact: \"[High/Medium/Low]\"\n    \n  heuristic_violated: \"[Which Nielsen heuristic, if applicable]\"\n\nNielsen's 10 Heuristics (Quick Reference)\n#\tHeuristic\tWhat to Check\n1\tVisibility of system status\tLoading indicators, progress bars, confirmation messages\n2\tMatch real world\tLabels match user language, not internal jargon\n3\tUser control & freedom\tUndo, back, cancel, exit are easy to find\n4\tConsistency & standards\tSame action = same result everywhere\n5\tError prevention\tConfirmations, constraints, smart defaults\n6\tRecognition > recall\tOptions visible, not memorized\n7\tFlexibility & efficiency\tShortcuts for experts, simple for novices\n8\tAesthetic & minimalist\tNo unnecessary information competing for attention\n9\tError recovery\tClear error messages with solutions, not codes\n10\tHelp & documentation\tSearchable, task-focused, concise\nHeuristic Evaluation Scorecard\n\nRate each heuristic 1-5 per screen/flow:\n\nheuristic_audit:\n  screen: \"[Screen/Flow name]\"\n  evaluator: \"[Name]\"\n  date: \"YYYY-MM-DD\"\n  \n  scores:\n    visibility_of_status: 4\n    real_world_match: 3\n    user_control: 2\n    consistency: 4\n    error_prevention: 3\n    recognition_over_recall: 4\n    flexibility_efficiency: 2\n    aesthetic_minimal: 3\n    error_recovery: 1\n    help_documentation: 2\n  \n  total: 28  # out of 50\n  grade: \"C\"  # A=45+, B=38+, C=28+, D=20+, F=<20\n  \n  critical_issues:\n    - heuristic: \"Error recovery\"\n      location: \"[Where]\"\n      issue: \"[What's wrong]\"\n      fix: \"[Recommendation]\"\n\nPhase 7: Research Synthesis\nAffinity Mapping Process\nExtract: Pull every observation, quote, behavior onto individual notes\nCluster: Group similar notes (bottom-up, not top-down)\nLabel: Name each cluster with a theme (use participant language)\nHierarchy: Group clusters into meta-themes\nPrioritize: Rank by frequency × impact\nTheme Template\ntheme:\n  name: \"[Theme label — use participant language]\"\n  description: \"[2-3 sentence summary]\"\n  \n  evidence:\n    participant_count: \"8/12 participants\"\n    segments_affected: [\"Persona A\", \"Persona B\"]\n    \n    quotes:\n      - participant: \"P03\"\n        quote: \"\\\"[Exact quote]\\\"\"\n      - participant: \"P07\"\n        quote: \"\\\"[Exact quote]\\\"\"\n    \n    behaviors_observed:\n      - \"[What they did]\"\n      - \"[Pattern across participants]\"\n    \n    data_points:\n      - \"[Any quantitative support — survey %, analytics, etc.]\"\n  \n  impact:\n    on_users: \"[How this affects their experience]\"\n    on_business: \"[Revenue, retention, acquisition, support cost impact]\"\n    severity: \"High\"  # High / Medium / Low\n  \n  insight: \"[The 'so what' — what does this mean for design?]\"\n  \n  recommendations:\n    - recommendation: \"[Specific, actionable change]\"\n      effort: \"M\"\n      impact: \"High\"\n      confidence: \"High\"  # based on evidence strength\n\nInsight Formula\n\nEvery insight must follow: Observation + Evidence + So What + Now What\n\n\"Users consistently [OBSERVATION] — seen in [X/Y participants, with supporting quotes]. This matters because [SO WHAT — impact on goals/business]. We should [NOW WHAT — specific recommendation].\"\n\nBad insight: \"Users found the navigation confusing\" Good insight: \"7 of 12 participants couldn't find the settings page within 30 seconds. 4 looked in the profile menu, 2 used search, 1 gave up. This maps to 15% of support tickets ('How do I change my password'). Moving settings to the top-level nav and adding a search shortcut would reduce discovery time and cut related support volume.\"\n\nResearch Scoring Rubric (0-100)\nDimension\tWeight\tCriteria\nMethodology Rigor\t20%\tRight method for question, adequate sample, proper recruiting\nData Quality\t15%\tRich observations, real quotes, behavioral evidence\nAnalysis Depth\t20%\tBeyond surface themes, root causes identified, patterns across segments\nInsight Actionability\t25%\tSpecific recommendations, effort/impact rated, prioritized\nPresentation Clarity\t10%\tStakeholders can understand and act without explanation\nBusiness Connection\t10%\tFindings connected to business metrics and goals\n\nScoring:\n\n90-100: Publication-quality research\n75-89: Strong actionable research\n60-74: Adequate — some gaps in methodology or analysis\n40-59: Weak — findings are surface-level or poorly supported\nBelow 40: Redo — methodology flaws undermine findings\nPhase 8: Research Report\nExecutive Summary Template\n# [Study Name] — Research Report\n\n## TL;DR (3 bullet max)\n- [Most important finding + recommendation]\n- [Second most important finding + recommendation]  \n- [Third most important finding + recommendation]\n\n## Study Overview\n- **Method:** [e.g., 12 semi-structured interviews + 5 usability tests]\n- **Participants:** [e.g., 12 mid-market SaaS PMs, 2-8 years experience]\n- **Duration:** [e.g., 3 weeks, Jan 5-26 2026]\n- **Confidence:** [High / Medium / Low — based on sample + methodology]\n\n## Key Findings\n\n### Finding 1: [Title] ⚠️ [Severity: Critical/High/Medium/Low]\n**What we found:** [2-3 sentences with evidence]\n**Why it matters:** [Business impact]\n**Recommendation:** [Specific action]\n**Effort:** [S/M/L] | **Impact:** [High/Med/Low]\n\n### Finding 2: [Title]\n...\n\n## Personas Updated\n[Link to updated persona YAML files]\n\n## Journey Map\n[Link to journey map]\n\n## Design Recommendations (Prioritized)\n\n| # | Recommendation | Finding | Effort | Impact | Priority |\n|---|---------------|---------|--------|--------|----------|\n| 1 | [Action] | F1 | S | High | P0 — Do now |\n| 2 | [Action] | F3 | M | High | P1 — Next sprint |\n| 3 | [Action] | F2 | L | Medium | P2 — Backlog |\n\n## What We Still Don't Know\n- [Open questions for future research]\n- [Hypotheses to validate]\n\n## Appendix\n- Screener criteria\n- Interview guide\n- Raw data location\n- Participant demographics\n\nPhase 9: Design Validation\nDesign Critique Framework (CAMPS)\nDimension\tQuestions to Ask\nClarity\tCan users understand what this is and what to do within 5 seconds?\nAlignment\tDoes this solve the problem identified in research? For the right persona?\nMental Model\tDoes it match how users think about this task? (from interview data)\nPriority\tDoes the visual hierarchy match user task priority?\nSimplicity\tCan anything be removed without losing function?\nPrototype Review Checklist\ndesign_review:\n  screen: \"[Screen name]\"\n  reviewer: \"[Name]\"\n  date: \"YYYY-MM-DD\"\n  \n  research_alignment:\n    - check: \"Addresses top pain point from research\"\n      status: \"✅ / ❌ / ⚠️\"\n      notes: \"[Which finding this addresses]\"\n    - check: \"Uses language from user interviews (not internal jargon)\"\n      status: \"✅ / ❌ / ⚠️\"\n    - check: \"Matches mental model revealed in research\"\n      status: \"✅ / ❌ / ⚠️\"\n    - check: \"Works for primary persona AND doesn't break for secondary\"\n      status: \"✅ / ❌ / ⚠️\"\n  \n  usability:\n    - check: \"Primary action is visually dominant\"\n      status: \"✅ / ❌ / ⚠️\"\n    - check: \"Error states designed and messaged\"\n      status: \"✅ / ❌ / ⚠️\"\n    - check: \"Empty states designed (first use, no data, no results)\"\n      status: \"✅ / ❌ / ⚠️\"\n    - check: \"Loading states designed\"\n      status: \"✅ / ❌ / ⚠️\"\n    - check: \"Edge cases handled (long text, missing data, permissions)\"\n      status: \"✅ / ❌ / ⚠️\"\n  \n  accessibility:\n    - check: \"Color contrast meets WCAG AA (4.5:1 text, 3:1 UI)\"\n      status: \"✅ / ❌ / ⚠️\"\n    - check: \"Touch targets ≥44px\"\n      status: \"✅ / ❌ / ⚠️\"\n    - check: \"Information not conveyed by color alone\"\n      status: \"✅ / ❌ / ⚠️\"\n    - check: \"Logical reading/tab order\"\n      status: \"✅ / ❌ / ⚠️\"\n    - check: \"Alt text for meaningful images\"\n      status: \"✅ / ❌ / ⚠️\"\n  \n  overall_score: \"[1-5]\"\n  ship_decision: \"Ready / Needs changes / Needs testing / Needs research\"\n\nPhase 10: Research Operations\nResearch Repository Structure\nresearch/\n├── YYYY/\n│   ├── Q1/\n│   │   ├── [study-name]/\n│   │   │   ├── plan.yaml          # Research brief\n│   │   │   ├── screener.yaml      # Recruiting criteria\n│   │   │   ├── guide.md           # Interview/test guide\n│   │   │   ├── notes/             # Per-participant notes\n│   │   │   │   ├── P01.yaml\n│   │   │   │   └── P02.yaml\n│   │   │   ├── synthesis/         # Themes, affinity maps\n│   │   │   ├── personas/          # Updated personas\n│   │   │   ├── journey-maps/      # Updated maps\n│   │   │   ├── report.md          # Final report\n│   │   │   └── recordings/        # Session recordings (link)\n│   │   └── [next-study]/\n│   └── Q2/\n├── personas/                      # Master persona library\n│   ├── persona-a.yaml\n│   └── persona-b.yaml\n├── journey-maps/                  # Master journey maps\n├── insights-database.yaml         # Cross-study insight tracker\n└── research-calendar.yaml         # Planned studies\n\nCross-Study Insight Tracker\ninsights_database:\n  - insight_id: \"INS-001\"\n    theme: \"[Category]\"\n    insight: \"[The insight]\"\n    first_found: \"2026-01-15\"\n    studies: [\"Study A\", \"Study C\", \"Study F\"]\n    evidence_strength: \"Strong\"  # 3+ studies\n    status: \"Addressed\"  # Open / In Progress / Addressed / Won't Fix\n    design_response: \"[What was done]\"\n    impact_measured: \"[Before/after metric if available]\"\n\nResearch Impact Tracking\nMetric\tHow to Measure\tTarget\nFindings → shipped features\t% of recommendations implemented within 2 quarters\t>60%\nPre/post usability scores\tSUS score before vs after changes\t+10 points\nSupport ticket reduction\tRelated ticket volume after design change\t-25%\nTask completion rate\tUsability test success rate over time\t>85%\nTime on task\tAverage task time trend\tDecreasing\nStakeholder confidence\tPost-study survey: \"How useful was this?\"\t>4/5\nQuick Commands\nCommand\tWhat It Does\n\"Plan a research study for [topic]\"\tGenerate research brief YAML\n\"Build a screener for [audience]\"\tGenerate screening questionnaire\n\"Create interview guide for [topic]\"\tGenerate interview questions and structure\n\"Build persona from [data/notes]\"\tSynthesize data into persona YAML\n\"Map the journey for [persona + goal]\"\tGenerate journey map\n\"Plan usability test for [prototype]\"\tGenerate test plan with tasks\n\"Run heuristic evaluation of [screen/flow]\"\tScore against Nielsen's 10\n\"Synthesize findings from [study]\"\tGenerate themes and insights\n\"Write research report for [study]\"\tGenerate executive summary and recommendations\n\"Score this research [report/study]\"\tEvaluate against quality rubric\n\"Review this design against research\"\tCAMPS critique + alignment check\n\"Set up research repository\"\tCreate folder structure and templates\nEdge Cases\nSmall Budget / No Recruiting Budget\nGuerrilla testing: coffee shop intercepts (5 min tests, buy them a coffee)\nInternal users: use colleagues from different departments (not product/design team)\nSocial media: post in relevant communities for volunteers\nExisting users: email opt-in for research panel\nRemote-Only Research\nVideo call with screen share (Zoom, Google Meet)\nAsync: Loom recordings of tasks + written responses\nUnmoderated: UserTesting.com, Maze, Lookback\nDiary studies: use messaging apps (WhatsApp, Telegram) for daily check-ins\nStakeholder Pushback (\"We don't have time for research\")\n\"5 users, 1 week, 3 critical findings\" — the minimum viable study\nPair research with existing touchpoints (support calls, sales demos)\nFrame as risk reduction: \"Would you rather discover this before or after launch?\"\nShow past research ROI (support ticket reduction, conversion improvement)\nConflicting Findings\nCheck sample composition — different segments may have different needs\nPrioritize by business impact: which segment is more valuable?\nRun a survey to quantify: \"60% prefer A, 40% prefer B\"\nConsider designing for both (progressive disclosure, personalization)\nInternational / Cross-Cultural Research\nDon't just translate — localize scenarios and contexts\nAccount for cultural response bias (e.g., reluctance to criticize in some cultures)\nUse local moderators when possible\nAdjust incentives to local norms\nWatch for design patterns that don't transfer (icons, colors, reading direction)\nAccessibility Research\nRecruit participants with disabilities (screen reader users, motor impairments, cognitive differences)\nTest with actual assistive technology, not simulation\nInclude in regular studies (at least 1 participant with accessibility needs per study)\nWCAG compliance testing is NOT a substitute for research with disabled users\n\nBuilt by AfrexAI — Autonomous Intelligence for Business"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/1kalin/afrexai-ux-research-engine",
    "publisherUrl": "https://clawhub.ai/1kalin/afrexai-ux-research-engine",
    "owner": "1kalin",
    "version": "1.0.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/afrexai-ux-research-engine",
    "downloadUrl": "https://openagent3.xyz/downloads/afrexai-ux-research-engine",
    "agentUrl": "https://openagent3.xyz/skills/afrexai-ux-research-engine/agent",
    "manifestUrl": "https://openagent3.xyz/skills/afrexai-ux-research-engine/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/afrexai-ux-research-engine/agent.md"
  }
}