{
  "schemaVersion": "1.0",
  "item": {
    "slug": "afrexai-interview-architect",
    "name": "Interview Architect",
    "source": "tencent",
    "type": "skill",
    "category": "开发工具",
    "sourceUrl": "https://clawhub.ai/1kalin/afrexai-interview-architect",
    "canonicalUrl": "https://clawhub.ai/1kalin/afrexai-interview-architect",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/afrexai-interview-architect",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=afrexai-interview-architect",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "README.md",
      "SKILL.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-30T16:55:25.780Z",
      "expiresAt": "2026-05-07T16:55:25.780Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
        "contentDisposition": "attachment; filename=\"network-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/afrexai-interview-architect"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/afrexai-interview-architect",
    "agentPageUrl": "https://openagent3.xyz/skills/afrexai-interview-architect/agent",
    "manifestUrl": "https://openagent3.xyz/skills/afrexai-interview-architect/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/afrexai-interview-architect/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Then review README.md for any prerequisites, environment setup, or post-install checks. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Then review README.md for any prerequisites, environment setup, or post-install checks. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "Interview Architect",
        "body": "Complete hiring interview system — from job scorecard design through structured question banks, live evaluation rubrics, panel coordination, and offer decisions. Eliminates gut-feel hiring with evidence-based frameworks that predict on-the-job performance."
      },
      {
        "title": "Quick Start",
        "body": "Tell me what you need:\n\n\"Design interviews for [role]\" → Full interview plan (scorecard + questions + rubrics)\n\"Create a scorecard for [role]\" → A-Player definition with measurable outcomes\n\"Generate questions for [skill/competency]\" → Targeted question bank\n\"Build a take-home assignment for [role]\" → Technical assessment with rubric\n\"Evaluate this candidate\" → Structured debrief with scoring\n\"Audit our interview process\" → Bias check + effectiveness review"
      },
      {
        "title": "Phase 1: Job Scorecard (Define Before You Evaluate)",
        "body": "Rule: Never look at a resume before defining what \"great\" looks like."
      },
      {
        "title": "Scorecard Template",
        "body": "scorecard:\n  role: \"[Title]\"\n  level: \"[Junior/Mid/Senior/Staff/Principal/Director/VP]\"\n  team: \"[Team name]\"\n  hiring_manager: \"[Name]\"\n  created: \"YYYY-MM-DD\"\n\n  mission:\n    statement: \"[One sentence: why does this role exist?]\"\n    success_metric: \"[How we'll know this hire was successful in 12 months]\"\n\n  outcomes:\n    # 3-5 specific, measurable results expected in first 12 months\n    - outcome: \"[e.g., Reduce deployment time from 45min to <10min]\"\n      measure: \"[Metric: deployment duration, measured via CI/CD logs]\"\n      timeline: \"Q1-Q2\"\n      priority: \"critical\"\n\n    - outcome: \"[e.g., Ship v2 API with 99.9% uptime]\"\n      measure: \"[Uptime %, error rate, customer adoption]\"\n      timeline: \"Q2-Q3\"\n      priority: \"critical\"\n\n    - outcome: \"[e.g., Mentor 2 junior engineers to mid-level competency]\"\n      measure: \"[Promotion readiness assessment, PR quality metrics]\"\n      timeline: \"Q3-Q4\"\n      priority: \"important\"\n\n  competencies:\n    technical:\n      must_have:\n        - name: \"[e.g., System design]\"\n          level: \"[Novice/Competent/Proficient/Expert]\"\n          evidence: \"[What demonstrates this: e.g., designed systems handling 10K+ RPS]\"\n        - name: \"[e.g., TypeScript/React]\"\n          level: \"Proficient\"\n          evidence: \"[Shipped production TS/React apps, not just tutorials]\"\n      nice_to_have:\n        - name: \"[e.g., Kubernetes]\"\n          level: \"Competent\"\n\n    behavioral:\n      must_have:\n        - name: \"Ownership\"\n          definition: \"Takes responsibility for outcomes, not just tasks. Doesn't wait to be told.\"\n          anti_pattern: \"Says 'that's not my job' or 'I was told to do X'\"\n        - name: \"Communication\"\n          definition: \"Explains complex ideas simply. Writes clear docs. Raises issues early.\"\n          anti_pattern: \"Surprises stakeholders. Can't explain their own work.\"\n        - name: \"Growth mindset\"\n          definition: \"Seeks feedback. Admits mistakes. Improves from failure.\"\n          anti_pattern: \"Defensive about criticism. Repeats same mistakes.\"\n      nice_to_have:\n        - name: \"[e.g., Cross-functional leadership]\"\n\n    cultural:\n      values_alignment:\n        - \"[Company value 1: what this looks like in practice]\"\n        - \"[Company value 2: what this looks like in practice]\"\n      anti_signals:\n        - \"[Red flag behavior 1]\"\n        - \"[Red flag behavior 2]\"\n\n  compensation:\n    band: \"[min - max]\"\n    equity: \"[range if applicable]\"\n    flexibility: \"[What's negotiable]\"\n\n  deal_breakers:\n    # Hard no's — instant disqualification\n    - \"[e.g., Cannot start within 4 weeks]\"\n    - \"[e.g., No experience with production systems at scale]\"\n    - \"[e.g., Requires >30% above band]\""
      },
      {
        "title": "Scorecard Quality Check",
        "body": "Before proceeding, verify:\n\nMission statement is one sentence (not a paragraph)\n Each outcome has a specific number or metric (not \"improve\" or \"help with\")\n Competencies distinguish must-have from nice-to-have\n Anti-patterns defined for each behavioral competency\n Deal breakers are objective (not subjective feelings)\n Band is realistic for the market (check levels.fyi, Glassdoor)"
      },
      {
        "title": "Interview Loop Template",
        "body": "interview_loop:\n  role: \"[from scorecard]\"\n  total_duration: \"[X hours across Y sessions]\"\n  \n  stages:\n    - stage: \"Resume Screen\"\n      duration: \"5-10 min\"\n      who: \"Recruiter or hiring manager\"\n      evaluates: [\"deal_breakers\", \"basic_qualification\"]\n      pass_rate_target: \"30-40%\"\n      \n    - stage: \"Phone Screen\"\n      duration: \"30 min\"\n      who: \"Hiring manager\"\n      evaluates: [\"communication\", \"motivation\", \"outcome_1_capability\"]\n      format: \"Structured conversation\"\n      pass_rate_target: \"50%\"\n      \n    - stage: \"Technical Assessment\"\n      duration: \"60-90 min\"\n      who: \"Senior engineer\"\n      evaluates: [\"technical_competencies\"]\n      format: \"Live coding OR take-home (see Phase 4)\"\n      pass_rate_target: \"40-50%\"\n      \n    - stage: \"System Design\"\n      duration: \"45-60 min\"\n      who: \"Staff+ engineer\"\n      evaluates: [\"system_design\", \"trade_off_thinking\", \"communication\"]\n      format: \"Whiteboard/collaborative design\"\n      pass_rate_target: \"50%\"\n      applies_to: \"Senior+ only\"\n      \n    - stage: \"Behavioral Deep-Dive\"\n      duration: \"45-60 min\"\n      who: \"Hiring manager + cross-functional partner\"\n      evaluates: [\"behavioral_competencies\", \"cultural_values\"]\n      format: \"STAR-based structured interview\"\n      pass_rate_target: \"60%\"\n      \n    - stage: \"Team Fit / Reverse Interview\"\n      duration: \"30 min\"\n      who: \"2-3 potential teammates\"\n      evaluates: [\"collaboration_style\", \"candidate_questions\"]\n      format: \"Informal but structured\"\n      pass_rate_target: \"80%\"\n      \n    - stage: \"Hiring Manager Final\"\n      duration: \"30 min\"\n      who: \"Hiring manager\"\n      evaluates: [\"remaining_concerns\", \"motivation\", \"offer_readiness\"]\n      format: \"Conversation\"\n\n  timeline:\n    screen_to_onsite: \"< 5 business days\"\n    onsite_to_decision: \"< 2 business days\"\n    decision_to_offer: \"< 1 business day\"\n    total_process: \"< 3 weeks\""
      },
      {
        "title": "Level-Appropriate Loop Adjustments",
        "body": "LevelSkipAddEmphasisJunior (0-2 yr)System designPair programming, learning abilityPotential > experienceMid (2-5 yr)——Balanced: execution + growthSenior (5-8 yr)—Architecture discussionImpact, ownership, mentoringStaff (8+ yr)Basic codingDesign doc review, strategyInfluence, technical visionPrincipalBasic codingVision presentation, exec interviewOrg-wide impactManagerLive codingSkip-level, cross-functionalPeople outcomes, strategyDirector+All IC technicalBoard/exec presentationBusiness impact, org building"
      },
      {
        "title": "Behavioral Questions (STAR Format)",
        "body": "For each question below:\n\nAsk the main question\nThen probe with: \"Walk me through specifically what YOU did\" (not the team)\nThen probe with: \"What was the measurable result?\"\nWatch for: vague answers, \"we\" without \"I\", unable to recall specifics\n\nOwnership & Initiative\n\nQ: \"Tell me about a time you identified a problem no one asked you to fix, and you fixed it anyway.\"\n  Probe: \"How did you discover it? What did you do first? What was the outcome?\"\n  Green signal: Specific problem, proactive action, measurable impact\n  Red flag: Can't recall an example, or problem was trivial\n\nQ: \"Describe a project that failed or didn't meet expectations. What was your role?\"\n  Probe: \"What would you do differently? What did you learn?\"\n  Green signal: Owns their part, specific lessons, changed behavior afterward\n  Red flag: Blames others, no learning, defensive\n\nQ: \"Tell me about the last time you disagreed with your manager's technical decision.\"\n  Probe: \"How did you raise it? What happened? Would you do it differently?\"\n  Green signal: Respectful pushback with data, compromise or acceptance\n  Red flag: Never disagrees, or went around manager, or still bitter\n\nCommunication & Collaboration\n\nQ: \"Describe the most complex technical concept you had to explain to a non-technical audience.\"\n  Probe: \"How did you know they understood? What would you change?\"\n  Green signal: Adapts language, checks understanding, uses analogies\n  Red flag: Talks down, uses jargon anyway, frustrated by the need\n\nQ: \"Tell me about a cross-team project that had conflicting priorities.\"\n  Probe: \"How did you align the teams? What trade-offs were made?\"\n  Green signal: Proactive communication, documented agreements, escalated appropriately\n  Red flag: Waited for someone else to resolve, or steamrolled\n\nQ: \"Give me an example of written communication that had significant impact.\"\n  Probe: \"What was the context? Who was the audience? What resulted?\"\n  Green signal: Design doc, RFC, post-mortem that changed decisions\n  Red flag: Can't think of one, or only Slack messages\n\nTechnical Excellence\n\nQ: \"What's the best piece of code or system you've built? Walk me through it.\"\n  Probe: \"What trade-offs did you make? What would you change now?\"\n  Green signal: Deep understanding, clear trade-off reasoning, honest about flaws\n  Red flag: Can't go deep, no awareness of trade-offs\n\nQ: \"Tell me about a production incident you were involved in resolving.\"\n  Probe: \"How did you diagnose it? What was root cause? What prevented recurrence?\"\n  Green signal: Systematic debugging, root cause fix (not band-aid), prevention measures\n  Red flag: Only applied quick fix, blamed infrastructure, no follow-up\n\nQ: \"Describe a time you had to make a technical decision with incomplete information.\"\n  Probe: \"What did you know? What didn't you know? How did you decide?\"\n  Green signal: Explicit about unknowns, gathered what they could, made reversible decision\n  Red flag: Paralyzed, or overconfident without data\n\nLeadership & Mentoring (Senior+)\n\nQ: \"Tell me about someone you helped grow significantly in their career.\"\n  Probe: \"What did you specifically do? How did you know it was working?\"\n  Green signal: Specific actions (pair programming, stretch assignments, feedback), measurable growth\n  Red flag: \"I told them what to do\" or can't name anyone\n\nQ: \"Describe a technical strategy or vision you set for your team.\"\n  Probe: \"How did you get buy-in? How did you measure progress?\"\n  Green signal: Clear rationale, stakeholder alignment, adapted based on feedback\n  Red flag: Top-down mandate, or never set direction\n\nQ: \"Tell me about a time you had to say no to a stakeholder or product request.\"\n  Probe: \"How did you explain it? What was the outcome?\"\n  Green signal: Data-driven reasoning, offered alternatives, maintained relationship\n  Red flag: Just said no, or always says yes"
      },
      {
        "title": "Forensic Resume Questions (Pressure Tests)",
        "body": "For each resume highlight, design verification questions:\n\nPattern: \"[Impressive claim on resume]\"\n→ \"Walk me through [specific project]. What was the state when you joined?\"\n→ \"What was YOUR specific contribution vs the team's?\"\n→ \"What was the hardest technical problem YOU solved?\"\n→ \"If I called your manager from that time, what would they say was your biggest weakness?\"\n\nPattern: \"Led team of X\"\n→ \"How many people reported to you directly?\"\n→ \"Name someone you had to give tough feedback to. What happened?\"\n→ \"Who was the weakest performer? What did you do about it?\"\n\nPattern: \"Improved X by Y%\"\n→ \"What was the baseline measurement? How did you measure it?\"\n→ \"What was it before you started? After? How long did it take?\"\n→ \"What else changed during that period that could explain the improvement?\"\n\nPattern: \"Short tenure (< 1 year)\"\n→ \"Walk me through your decision to leave [company].\"\n→ \"What would your manager there say about your departure?\"\n→ \"What did you learn from that experience?\"\n\nPattern: \"Gap in employment\"\n→ Ask once, move on. Don't dwell. Valid reasons: health, family, travel, learning, job market.\n→ Red flag only if: story keeps changing, or they're evasive about a very long gap"
      },
      {
        "title": "Future Simulation Questions (Performance Prediction)",
        "body": "Design scenario questions based on the actual role's outcomes:\n\nTemplate:\n\"In this role, one of your first challenges will be [outcome from scorecard].\nThe current situation is [honest context]. \nWalk me through how you'd approach this in your first [timeframe].\"\n\nExample (Senior Backend):\n\"Our API currently handles 2K RPS but we need to scale to 50K by Q3.\nThe codebase is a 3-year-old Node.js monolith with PostgreSQL.\nBudget for infrastructure is $10K/mo. Team is 4 engineers including you.\nHow would you approach this?\"\n\nProbe sequence:\n1. \"What would you do in week 1?\" (Information gathering)\n2. \"What data would you need?\" (Analytical thinking)\n3. \"What are the biggest risks?\" (Risk awareness)\n4. \"If [constraint changes], how does your approach change?\" (Adaptability)\n5. \"How would you communicate progress to stakeholders?\" (Communication)\n\nScoring:\n5 — Structured approach, asks clarifying questions, identifies trade-offs, realistic timeline\n4 — Good approach with minor gaps\n3 — Reasonable but generic, doesn't probe assumptions\n2 — Jumps to solution without understanding problem\n1 — No coherent approach, or unrealistic"
      },
      {
        "title": "Live Coding Assessment Design",
        "body": "coding_assessment:\n  duration: \"60 min\"\n  structure:\n    warm_up: \"5 min — environment setup, introduce the problem\"\n    problem_1: \"20 min — core implementation\"\n    problem_2: \"25 min — extension or new problem\"\n    debrief: \"10 min — trade-offs discussion\"\n\n  problem_design_rules:\n    - Solvable in the time limit (test it yourself first — halve your time)\n    - Multiple valid approaches (no single \"right answer\")\n    - Extension points for stronger candidates\n    - Relevant to actual work (not algorithm puzzles unless role requires it)\n    - Candidate chooses their language\n    - Provide starter code / boilerplate to reduce setup time\n\n  evaluation_rubric:\n    problem_solving:\n      5: \"Breaks down problem, considers edge cases upfront, efficient approach\"\n      3: \"Gets to solution but misses edge cases or takes indirect path\"\n      1: \"Struggles to break down problem, no clear approach\"\n    \n    code_quality:\n      5: \"Clean, readable, well-named, handles errors, testable\"\n      3: \"Works but messy, some error handling, reasonable naming\"\n      1: \"Barely works, no error handling, unclear naming\"\n    \n    communication:\n      5: \"Thinks aloud, explains trade-offs, asks clarifying questions\"\n      3: \"Some explanation, responds to prompts\"\n      1: \"Silent, defensive about suggestions, doesn't explain reasoning\"\n    \n    testing_awareness:\n      5: \"Writes tests unprompted, considers edge cases, talks about test strategy\"\n      3: \"Writes tests when prompted, covers happy path\"\n      1: \"No testing consideration\"\n    \n    speed_and_fluency:\n      5: \"Fast, clearly experienced, language/tooling fluent\"\n      3: \"Reasonable pace, occasional lookups\"\n      1: \"Very slow, struggles with syntax/tooling\"\n\n  do_not:\n    - Ask trick questions or gotchas\n    - Time pressure beyond reasonable\n    - Penalize for looking things up\n    - Judge IDE/editor choice\n    - Ask questions that require proprietary knowledge"
      },
      {
        "title": "Take-Home Assessment Design",
        "body": "take_home:\n  time_limit: \"3-4 hours (honor system, state clearly)\"\n  deadline: \"5-7 days from send\"\n  \n  problem_design:\n    - Real-world scenario (not academic)\n    - Clear requirements with defined scope\n    - Extension section for candidates who want to show more\n    - Starter repo with CI, linting, test framework pre-configured\n    \n  deliverables:\n    required:\n      - Working solution\n      - Tests (at minimum: happy path + 2 edge cases)\n      - README explaining approach, trade-offs, what you'd improve\n    optional:\n      - Architecture diagram\n      - Performance analysis\n      - Additional features from extension section\n  \n  evaluation_rubric:\n    functionality: \"30% — Does it work? Edge cases handled?\"\n    code_quality: \"25% — Clean, readable, maintainable, well-structured\"\n    testing: \"20% — Coverage, meaningful tests, edge cases\"\n    documentation: \"15% — README quality, trade-off explanations\"\n    extras: \"10% — Extension features, thoughtful additions\"\n\n  anti_gaming:\n    - Check git history (single mega-commit = suspicious)\n    - Ask about implementation details in follow-up interview\n    - Vary the problem slightly across candidates\n    - Time the follow-up discussion: over-engineered solutions + can't explain = red flag"
      },
      {
        "title": "System Design Assessment (Senior+)",
        "body": "system_design:\n  duration: \"45-60 min\"\n  structure:\n    requirements: \"10 min — clarify scope, constraints, scale\"\n    high_level: \"15 min — components, data flow, API design\"\n    deep_dive: \"15 min — pick 1-2 areas to go deep\"\n    trade_offs: \"10 min — discuss alternatives, failure modes\"\n    extensions: \"5 min — how would this evolve?\"\n\n  evaluation:\n    requirements_gathering:\n      5: \"Asks about scale, users, latency requirements, budget before designing\"\n      3: \"Some clarifying questions but misses key constraints\"\n      1: \"Jumps straight to drawing boxes\"\n    \n    high_level_design:\n      5: \"Clear components with well-defined boundaries, data flows make sense\"\n      3: \"Reasonable architecture but some unclear responsibilities\"\n      1: \"Vague boxes with arrows, can't explain data flow\"\n    \n    depth:\n      5: \"Deep knowledge in chosen area, considers failure modes, cites real experience\"\n      3: \"Good knowledge but stays surface level\"\n      1: \"Can't go deep on any component\"\n    \n    trade_off_awareness:\n      5: \"Explicitly names trade-offs, compares alternatives, knows when each fits\"\n      3: \"Acknowledges trade-offs when prompted\"\n      1: \"Presents one approach as the only option\"\n    \n    scalability:\n      5: \"Considers growth path, bottleneck identification, realistic scaling strategy\"\n      3: \"Basic scaling awareness\"\n      1: \"No consideration of scale or unrealistic assumptions\""
      },
      {
        "title": "Per-Interviewer Scorecard",
        "body": "interviewer_scorecard:\n  candidate: \"[name]\"\n  interviewer: \"[name]\"\n  stage: \"[which interview]\"\n  date: \"YYYY-MM-DD\"\n  \n  # Score BEFORE reading other interviewers' feedback\n  overall: 1-5  # 1=Strong No, 2=Lean No, 3=Neutral, 4=Lean Yes, 5=Strong Yes\n  \n  competency_scores:\n    - competency: \"[from scorecard]\"\n      score: 1-5\n      evidence: \"[Specific quote or behavior observed]\"\n      \n    - competency: \"[from scorecard]\"\n      score: 1-5\n      evidence: \"[Specific quote or behavior observed]\"\n  \n  green_signals:\n    - \"[Specific positive indicator with evidence]\"\n    \n  red_flags:\n    - \"[Specific concern with evidence]\"\n    \n  questions_for_next_interviewer:\n    - \"[What to probe further]\"\n\n  # IMPORTANT: Submit before debrief. Do not change after discussion."
      },
      {
        "title": "Debrief Protocol",
        "body": "1. BEFORE debrief:\n   - All interviewers submit scorecards independently\n   - Hiring manager collects but does NOT share scores\n\n2. DEBRIEF structure (30-45 min):\n   a. Each interviewer states their overall vote FIRST (no explanation yet)\n      → This prevents anchoring bias from persuasive speakers\n   \n   b. Lowest scorer goes first (explain concerns)\n      → Prevents positive bias from drowning out concerns\n   \n   c. Highest scorer responds\n   \n   d. Open discussion — focus on EVIDENCE not feelings\n      → \"They seemed smart\" is not evidence\n      → \"They designed a cache invalidation strategy that handled...\" IS evidence\n   \n   e. Address conflicting signals:\n      → If strong yes + strong no on same competency, that's the discussion\n      → Resolve with: \"What specific behavior did you observe?\"\n   \n   f. Final vote (all interviewers):\n      → Strong Hire / Hire / No Hire / Strong No Hire\n      → Any \"Strong No Hire\" triggers discussion but NOT automatic rejection\n      → Hiring manager makes final call but must document reasoning\n\n3. AFTER debrief:\n   - Decision recorded with reasoning\n   - Feedback compiled for candidate (regardless of outcome)\n   - Action items assigned (offer prep or rejection with feedback)"
      },
      {
        "title": "Scoring Decision Matrix",
        "body": "Strong Hire (all 4-5):\n  → Make offer within 24 hours\n  → Expedite process — strong candidates have multiple offers\n\nHire (mix of 3-5, no 1s):\n  → Make offer within 48 hours\n  → Address any 3-scores with targeted onboarding plan\n\nBorderline (mix of 2-4):\n  → Additional data needed — one more focused interview on weak areas\n  → Set a deadline: if still borderline after additional data → No Hire\n  → \"When in doubt, don't hire\" — the cost of a bad hire > cost of continuing search\n\nNo Hire (any 1, or multiple 2s):\n  → Decline with specific, constructive feedback\n  → Document clearly for future reference (candidate may reapply)\n\nStrong No Hire (multiple 1s or deal breaker):\n  → Immediate decline\n  → Review: did we miss this in screening? Fix the funnel."
      },
      {
        "title": "Pre-Interview Bias Checks",
        "body": "Before each interview, remind yourself:\n□ I will evaluate against the SCORECARD, not my \"gut feeling\"\n□ I will give the same weight to disconfirming evidence as confirming\n□ I will not let one great/terrible answer color the entire evaluation\n□ I will not compare this candidate to the last one — compare to the scorecard\n□ I will note specific behaviors, not general impressions\n□ I will not evaluate \"culture fit\" as \"would I have a beer with them\""
      },
      {
        "title": "Common Biases in Hiring",
        "body": "BiasWhat It Looks LikeMitigationHalo effectGreat at coding → assume great at everythingScore each competency independentlyHorn effectWeak communication → assume weak technicallySame: score independentlySimilarity bias\"Reminds me of me\" → favorable ratingEvaluate against scorecard, not selfAnchoringFirst impression sets the toneScore after all questions, not duringConfirmation biasEarly positive → only notice positivesActively look for counter-evidenceContrast effectLooks great after a weak candidateCompare to scorecard, not other candidatesRecency biasRemember last answer, forget firstTake notes during interviewAttribution errorSuccess = skill, failure = circumstancesProbe both: \"What went wrong? What helped?\"Leniency biasAvoid conflict, rate everyone 3-4Force yourself to use the full 1-5 scaleUrgency bias\"We need someone NOW\" → lower barNever lower scorecard standards — extend timeline instead"
      },
      {
        "title": "Structured Interview Rules",
        "body": "Same questions for same role — every candidate gets the same core questions\nScore immediately after — before discussing with anyone\nEvidence-based only — every score needs a specific observation\nDiverse panel — at least one interviewer from a different team/background\nBlind resume screen — remove name, school, company names for initial screen (if possible)\nNo leading questions — \"You're probably great at X, right?\" → \"Tell me about your experience with X\"\nTime-boxed — same duration for every candidate (don't cut short or extend based on vibes)"
      },
      {
        "title": "Communication Templates",
        "body": "After each stage — within 24 hours:\n\nADVANCING:\n\"Hi [name], thank you for your time today. We enjoyed our conversation about [specific topic]. \nWe'd like to move forward with [next stage]. [Interviewer name] will be speaking with you \nabout [topic]. Available times: [options]. \nAny questions before then? — [recruiter name]\"\n\nREJECTION (after phone screen):\n\"Hi [name], thank you for taking the time to speak with us about [role].\nAfter careful consideration, we've decided not to move forward at this stage.\n[One specific, constructive piece of feedback if appropriate].\nWe'll keep your information on file and may reach out for future opportunities that \nalign more closely. Wishing you the best in your search. — [name]\"\n\nREJECTION (after onsite):\n\"Hi [name], thank you for investing [X hours] in our interview process.\nWe were impressed by [specific positive], but ultimately decided to move forward \nwith a candidate whose [specific competency] more closely matches our current needs.\nFeedback: [1-2 specific, actionable items].\nWe genuinely appreciated your time and would welcome a future conversation \nif circumstances change. — [hiring manager name]\"\n\nOFFER (verbal, then written within 24h):\n\"Hi [name], I'm excited to share that we'd like to offer you the [role] position.\nWe were particularly impressed by [specific evidence from interviews].\nHere's what we're proposing: [comp summary]. I'll send the formal offer letter \nwithin 24 hours. Do you have any initial questions? — [hiring manager]\""
      },
      {
        "title": "Candidate Experience Scorecard",
        "body": "After every hire (and quarterly for all candidates):\n\nDimensionTargetHow to MeasureTime to schedule< 48h between stagesTrack in ATSInterviewer preparedness100% read scorecard beforePost-interview surveyCommunication timeliness< 24h responseTrack in ATSFeedback qualitySpecific + actionableCandidate surveyOverall experience4+/5Candidate survey (all, not just hires)Offer acceptance rate> 80%Track in ATS"
      },
      {
        "title": "Quarterly Hiring Review",
        "body": "quarterly_review:\n  period: \"Q[N] YYYY\"\n  \n  funnel_metrics:\n    applications: N\n    screens_passed: N  # → Screen pass rate\n    onsites: N         # → Onsite conversion rate  \n    offers: N          # → Offer rate\n    accepts: N         # → Acceptance rate\n    \n  quality_metrics:\n    ninety_day_retention: \"X%\"\n    manager_satisfaction_90d: \"X/5\"\n    time_to_productivity: \"X weeks\"\n    regretted_attrition_1yr: \"X%\"\n    \n  process_metrics:\n    time_to_fill: \"X days (target: <30)\"\n    time_in_stage:\n      screen: \"X days\"\n      onsite: \"X days\"  \n      decision: \"X days\"\n      offer: \"X days\"\n    interviewer_calibration: \"score variance across interviewers\"\n    \n  actions:\n    - \"[Improvement 1 based on metrics]\"\n    - \"[Improvement 2]\""
      },
      {
        "title": "Interview Question Effectiveness Tracking",
        "body": "For each question in your bank, track:\n\nquestion_effectiveness:\n  question: \"[question text]\"\n  times_asked: N\n  \n  signal_quality:\n    strong_differentiator: N  # Times this question clearly separated strong/weak\n    no_signal: N              # Times everyone answered similarly\n    confusing: N              # Times candidates misunderstood\n    \n  # If no_signal > 50% → Replace the question\n  # If confusing > 20% → Reword the question\n  # If strong_differentiator > 70% → Keep and promote"
      },
      {
        "title": "Interviewer Calibration",
        "body": "Monthly: Compare interviewer scores across candidates\n- Interviewer A averages 4.2, Interviewer B averages 2.8 → calibration needed\n- Run calibration session: review same candidate, discuss scoring differences\n- Goal: interviewers should be within 0.5 points on average for same candidates\n\nTraining for new interviewers:\n1. Shadow 3 interviews (observe, don't participate)\n2. Reverse shadow 2 interviews (conduct, observed by experienced interviewer)\n3. Solo with debrief for 3 interviews\n4. Full autonomy after calibration check"
      },
      {
        "title": "Internal Candidates",
        "body": "Use SAME scorecard as external (fairness)\nDifferent question strategy: focus on future role, not past (you already know their past)\nIf not selected: manager delivers feedback personally, development plan, timeline for re-candidacy\nNever promise the internal candidate gets special treatment"
      },
      {
        "title": "Executive Hiring",
        "body": "Add: reference checks (5+ structured, including back-channel)\nAdd: board/exec team dinner (culture, not evaluation)\nAdd: 90-day plan presentation as final stage\nExtended scorecard: strategic thinking, board management, talent magnetism\nUse executive search firm for sourcing, but own evaluation internally"
      },
      {
        "title": "High-Volume Hiring (10+ same role)",
        "body": "Standardize EVERYTHING: same questions, same rubric, same order\nUse structured scoring sheets, not free-form notes\nBatch calibration sessions weekly\nConsider: group assessment centers for initial stages\nTrack: quality variance across hiring managers (should be low)"
      },
      {
        "title": "Remote/Async Interviews",
        "body": "Test tech setup before the interview (not during)\nCamera on (both sides) — non-verbal cues matter\nRecord (with consent) for calibration purposes\nTake-home > live coding for timezone-challenged candidates\nBias alert: don't penalize for background noise, accent, or non-native English fluency"
      },
      {
        "title": "Boomerang Employees",
        "body": "Treat as new candidate (things change)\nSkip: basic company knowledge questions\nFocus: why they left, what changed, what they learned outside\nCheck: has the team/role changed since they left? Do current team members want them back?"
      },
      {
        "title": "Counteroffers",
        "body": "If candidate receives counteroffer:\n\nDon't panic-increase. Your offer should already be fair.\n\"We made our best offer based on the value of the role. We'd love to have you, but understand if you decide to stay.\"\nStatistics: 80% of people who accept counteroffers leave within 18 months anyway\nIf they stay: respect it, keep the door open"
      },
      {
        "title": "Natural Language Commands",
        "body": "SayI Do\"Design interviews for [role]\"Full loop: scorecard + structure + questions + rubrics\"Create a scorecard for [role]\"A-Player definition with outcomes and competencies\"Generate behavioral questions for [competency]\"STAR questions with probes and scoring\"Build a take-home for [role]\"Assessment with rubric and anti-gaming measures\"Design a system design interview for [level]\"Structure + evaluation rubric\"Evaluate candidate [name]\"Structured debrief template with scoring\"Create a phone screen for [role]\"30-min structured screen with pass/fail criteria\"Write rejection feedback for [candidate]\"Specific, constructive rejection message\"Audit our interview process\"Full process review with metrics and recommendations\"Calibrate interviewers\"Calibration session plan with scoring alignment\"Design interview for [role] at [company stage]\"Adjusted for startup/growth/enterprise context\"Generate reference check questions for [role]\"Structured reference interview guide"
      }
    ],
    "body": "Interview Architect\n\nComplete hiring interview system — from job scorecard design through structured question banks, live evaluation rubrics, panel coordination, and offer decisions. Eliminates gut-feel hiring with evidence-based frameworks that predict on-the-job performance.\n\nQuick Start\n\nTell me what you need:\n\n\"Design interviews for [role]\" → Full interview plan (scorecard + questions + rubrics)\n\"Create a scorecard for [role]\" → A-Player definition with measurable outcomes\n\"Generate questions for [skill/competency]\" → Targeted question bank\n\"Build a take-home assignment for [role]\" → Technical assessment with rubric\n\"Evaluate this candidate\" → Structured debrief with scoring\n\"Audit our interview process\" → Bias check + effectiveness review\nPhase 1: Job Scorecard (Define Before You Evaluate)\n\nRule: Never look at a resume before defining what \"great\" looks like.\n\nScorecard Template\nscorecard:\n  role: \"[Title]\"\n  level: \"[Junior/Mid/Senior/Staff/Principal/Director/VP]\"\n  team: \"[Team name]\"\n  hiring_manager: \"[Name]\"\n  created: \"YYYY-MM-DD\"\n\n  mission:\n    statement: \"[One sentence: why does this role exist?]\"\n    success_metric: \"[How we'll know this hire was successful in 12 months]\"\n\n  outcomes:\n    # 3-5 specific, measurable results expected in first 12 months\n    - outcome: \"[e.g., Reduce deployment time from 45min to <10min]\"\n      measure: \"[Metric: deployment duration, measured via CI/CD logs]\"\n      timeline: \"Q1-Q2\"\n      priority: \"critical\"\n\n    - outcome: \"[e.g., Ship v2 API with 99.9% uptime]\"\n      measure: \"[Uptime %, error rate, customer adoption]\"\n      timeline: \"Q2-Q3\"\n      priority: \"critical\"\n\n    - outcome: \"[e.g., Mentor 2 junior engineers to mid-level competency]\"\n      measure: \"[Promotion readiness assessment, PR quality metrics]\"\n      timeline: \"Q3-Q4\"\n      priority: \"important\"\n\n  competencies:\n    technical:\n      must_have:\n        - name: \"[e.g., System design]\"\n          level: \"[Novice/Competent/Proficient/Expert]\"\n          evidence: \"[What demonstrates this: e.g., designed systems handling 10K+ RPS]\"\n        - name: \"[e.g., TypeScript/React]\"\n          level: \"Proficient\"\n          evidence: \"[Shipped production TS/React apps, not just tutorials]\"\n      nice_to_have:\n        - name: \"[e.g., Kubernetes]\"\n          level: \"Competent\"\n\n    behavioral:\n      must_have:\n        - name: \"Ownership\"\n          definition: \"Takes responsibility for outcomes, not just tasks. Doesn't wait to be told.\"\n          anti_pattern: \"Says 'that's not my job' or 'I was told to do X'\"\n        - name: \"Communication\"\n          definition: \"Explains complex ideas simply. Writes clear docs. Raises issues early.\"\n          anti_pattern: \"Surprises stakeholders. Can't explain their own work.\"\n        - name: \"Growth mindset\"\n          definition: \"Seeks feedback. Admits mistakes. Improves from failure.\"\n          anti_pattern: \"Defensive about criticism. Repeats same mistakes.\"\n      nice_to_have:\n        - name: \"[e.g., Cross-functional leadership]\"\n\n    cultural:\n      values_alignment:\n        - \"[Company value 1: what this looks like in practice]\"\n        - \"[Company value 2: what this looks like in practice]\"\n      anti_signals:\n        - \"[Red flag behavior 1]\"\n        - \"[Red flag behavior 2]\"\n\n  compensation:\n    band: \"[min - max]\"\n    equity: \"[range if applicable]\"\n    flexibility: \"[What's negotiable]\"\n\n  deal_breakers:\n    # Hard no's — instant disqualification\n    - \"[e.g., Cannot start within 4 weeks]\"\n    - \"[e.g., No experience with production systems at scale]\"\n    - \"[e.g., Requires >30% above band]\"\n\nScorecard Quality Check\n\nBefore proceeding, verify:\n\n Mission statement is one sentence (not a paragraph)\n Each outcome has a specific number or metric (not \"improve\" or \"help with\")\n Competencies distinguish must-have from nice-to-have\n Anti-patterns defined for each behavioral competency\n Deal breakers are objective (not subjective feelings)\n Band is realistic for the market (check levels.fyi, Glassdoor)\nPhase 2: Interview Structure Design\nInterview Loop Template\ninterview_loop:\n  role: \"[from scorecard]\"\n  total_duration: \"[X hours across Y sessions]\"\n  \n  stages:\n    - stage: \"Resume Screen\"\n      duration: \"5-10 min\"\n      who: \"Recruiter or hiring manager\"\n      evaluates: [\"deal_breakers\", \"basic_qualification\"]\n      pass_rate_target: \"30-40%\"\n      \n    - stage: \"Phone Screen\"\n      duration: \"30 min\"\n      who: \"Hiring manager\"\n      evaluates: [\"communication\", \"motivation\", \"outcome_1_capability\"]\n      format: \"Structured conversation\"\n      pass_rate_target: \"50%\"\n      \n    - stage: \"Technical Assessment\"\n      duration: \"60-90 min\"\n      who: \"Senior engineer\"\n      evaluates: [\"technical_competencies\"]\n      format: \"Live coding OR take-home (see Phase 4)\"\n      pass_rate_target: \"40-50%\"\n      \n    - stage: \"System Design\"\n      duration: \"45-60 min\"\n      who: \"Staff+ engineer\"\n      evaluates: [\"system_design\", \"trade_off_thinking\", \"communication\"]\n      format: \"Whiteboard/collaborative design\"\n      pass_rate_target: \"50%\"\n      applies_to: \"Senior+ only\"\n      \n    - stage: \"Behavioral Deep-Dive\"\n      duration: \"45-60 min\"\n      who: \"Hiring manager + cross-functional partner\"\n      evaluates: [\"behavioral_competencies\", \"cultural_values\"]\n      format: \"STAR-based structured interview\"\n      pass_rate_target: \"60%\"\n      \n    - stage: \"Team Fit / Reverse Interview\"\n      duration: \"30 min\"\n      who: \"2-3 potential teammates\"\n      evaluates: [\"collaboration_style\", \"candidate_questions\"]\n      format: \"Informal but structured\"\n      pass_rate_target: \"80%\"\n      \n    - stage: \"Hiring Manager Final\"\n      duration: \"30 min\"\n      who: \"Hiring manager\"\n      evaluates: [\"remaining_concerns\", \"motivation\", \"offer_readiness\"]\n      format: \"Conversation\"\n\n  timeline:\n    screen_to_onsite: \"< 5 business days\"\n    onsite_to_decision: \"< 2 business days\"\n    decision_to_offer: \"< 1 business day\"\n    total_process: \"< 3 weeks\"\n\nLevel-Appropriate Loop Adjustments\nLevel\tSkip\tAdd\tEmphasis\nJunior (0-2 yr)\tSystem design\tPair programming, learning ability\tPotential > experience\nMid (2-5 yr)\t—\t—\tBalanced: execution + growth\nSenior (5-8 yr)\t—\tArchitecture discussion\tImpact, ownership, mentoring\nStaff (8+ yr)\tBasic coding\tDesign doc review, strategy\tInfluence, technical vision\nPrincipal\tBasic coding\tVision presentation, exec interview\tOrg-wide impact\nManager\tLive coding\tSkip-level, cross-functional\tPeople outcomes, strategy\nDirector+\tAll IC technical\tBoard/exec presentation\tBusiness impact, org building\nPhase 3: Question Banks\nBehavioral Questions (STAR Format)\n\nFor each question below:\n\nAsk the main question\nThen probe with: \"Walk me through specifically what YOU did\" (not the team)\nThen probe with: \"What was the measurable result?\"\nWatch for: vague answers, \"we\" without \"I\", unable to recall specifics\nOwnership & Initiative\nQ: \"Tell me about a time you identified a problem no one asked you to fix, and you fixed it anyway.\"\n  Probe: \"How did you discover it? What did you do first? What was the outcome?\"\n  Green signal: Specific problem, proactive action, measurable impact\n  Red flag: Can't recall an example, or problem was trivial\n\nQ: \"Describe a project that failed or didn't meet expectations. What was your role?\"\n  Probe: \"What would you do differently? What did you learn?\"\n  Green signal: Owns their part, specific lessons, changed behavior afterward\n  Red flag: Blames others, no learning, defensive\n\nQ: \"Tell me about the last time you disagreed with your manager's technical decision.\"\n  Probe: \"How did you raise it? What happened? Would you do it differently?\"\n  Green signal: Respectful pushback with data, compromise or acceptance\n  Red flag: Never disagrees, or went around manager, or still bitter\n\nCommunication & Collaboration\nQ: \"Describe the most complex technical concept you had to explain to a non-technical audience.\"\n  Probe: \"How did you know they understood? What would you change?\"\n  Green signal: Adapts language, checks understanding, uses analogies\n  Red flag: Talks down, uses jargon anyway, frustrated by the need\n\nQ: \"Tell me about a cross-team project that had conflicting priorities.\"\n  Probe: \"How did you align the teams? What trade-offs were made?\"\n  Green signal: Proactive communication, documented agreements, escalated appropriately\n  Red flag: Waited for someone else to resolve, or steamrolled\n\nQ: \"Give me an example of written communication that had significant impact.\"\n  Probe: \"What was the context? Who was the audience? What resulted?\"\n  Green signal: Design doc, RFC, post-mortem that changed decisions\n  Red flag: Can't think of one, or only Slack messages\n\nTechnical Excellence\nQ: \"What's the best piece of code or system you've built? Walk me through it.\"\n  Probe: \"What trade-offs did you make? What would you change now?\"\n  Green signal: Deep understanding, clear trade-off reasoning, honest about flaws\n  Red flag: Can't go deep, no awareness of trade-offs\n\nQ: \"Tell me about a production incident you were involved in resolving.\"\n  Probe: \"How did you diagnose it? What was root cause? What prevented recurrence?\"\n  Green signal: Systematic debugging, root cause fix (not band-aid), prevention measures\n  Red flag: Only applied quick fix, blamed infrastructure, no follow-up\n\nQ: \"Describe a time you had to make a technical decision with incomplete information.\"\n  Probe: \"What did you know? What didn't you know? How did you decide?\"\n  Green signal: Explicit about unknowns, gathered what they could, made reversible decision\n  Red flag: Paralyzed, or overconfident without data\n\nLeadership & Mentoring (Senior+)\nQ: \"Tell me about someone you helped grow significantly in their career.\"\n  Probe: \"What did you specifically do? How did you know it was working?\"\n  Green signal: Specific actions (pair programming, stretch assignments, feedback), measurable growth\n  Red flag: \"I told them what to do\" or can't name anyone\n\nQ: \"Describe a technical strategy or vision you set for your team.\"\n  Probe: \"How did you get buy-in? How did you measure progress?\"\n  Green signal: Clear rationale, stakeholder alignment, adapted based on feedback\n  Red flag: Top-down mandate, or never set direction\n\nQ: \"Tell me about a time you had to say no to a stakeholder or product request.\"\n  Probe: \"How did you explain it? What was the outcome?\"\n  Green signal: Data-driven reasoning, offered alternatives, maintained relationship\n  Red flag: Just said no, or always says yes\n\nForensic Resume Questions (Pressure Tests)\n\nFor each resume highlight, design verification questions:\n\nPattern: \"[Impressive claim on resume]\"\n→ \"Walk me through [specific project]. What was the state when you joined?\"\n→ \"What was YOUR specific contribution vs the team's?\"\n→ \"What was the hardest technical problem YOU solved?\"\n→ \"If I called your manager from that time, what would they say was your biggest weakness?\"\n\nPattern: \"Led team of X\"\n→ \"How many people reported to you directly?\"\n→ \"Name someone you had to give tough feedback to. What happened?\"\n→ \"Who was the weakest performer? What did you do about it?\"\n\nPattern: \"Improved X by Y%\"\n→ \"What was the baseline measurement? How did you measure it?\"\n→ \"What was it before you started? After? How long did it take?\"\n→ \"What else changed during that period that could explain the improvement?\"\n\nPattern: \"Short tenure (< 1 year)\"\n→ \"Walk me through your decision to leave [company].\"\n→ \"What would your manager there say about your departure?\"\n→ \"What did you learn from that experience?\"\n\nPattern: \"Gap in employment\"\n→ Ask once, move on. Don't dwell. Valid reasons: health, family, travel, learning, job market.\n→ Red flag only if: story keeps changing, or they're evasive about a very long gap\n\nFuture Simulation Questions (Performance Prediction)\n\nDesign scenario questions based on the actual role's outcomes:\n\nTemplate:\n\"In this role, one of your first challenges will be [outcome from scorecard].\nThe current situation is [honest context]. \nWalk me through how you'd approach this in your first [timeframe].\"\n\nExample (Senior Backend):\n\"Our API currently handles 2K RPS but we need to scale to 50K by Q3.\nThe codebase is a 3-year-old Node.js monolith with PostgreSQL.\nBudget for infrastructure is $10K/mo. Team is 4 engineers including you.\nHow would you approach this?\"\n\nProbe sequence:\n1. \"What would you do in week 1?\" (Information gathering)\n2. \"What data would you need?\" (Analytical thinking)\n3. \"What are the biggest risks?\" (Risk awareness)\n4. \"If [constraint changes], how does your approach change?\" (Adaptability)\n5. \"How would you communicate progress to stakeholders?\" (Communication)\n\nScoring:\n5 — Structured approach, asks clarifying questions, identifies trade-offs, realistic timeline\n4 — Good approach with minor gaps\n3 — Reasonable but generic, doesn't probe assumptions\n2 — Jumps to solution without understanding problem\n1 — No coherent approach, or unrealistic\n\nPhase 4: Technical Assessments\nLive Coding Assessment Design\ncoding_assessment:\n  duration: \"60 min\"\n  structure:\n    warm_up: \"5 min — environment setup, introduce the problem\"\n    problem_1: \"20 min — core implementation\"\n    problem_2: \"25 min — extension or new problem\"\n    debrief: \"10 min — trade-offs discussion\"\n\n  problem_design_rules:\n    - Solvable in the time limit (test it yourself first — halve your time)\n    - Multiple valid approaches (no single \"right answer\")\n    - Extension points for stronger candidates\n    - Relevant to actual work (not algorithm puzzles unless role requires it)\n    - Candidate chooses their language\n    - Provide starter code / boilerplate to reduce setup time\n\n  evaluation_rubric:\n    problem_solving:\n      5: \"Breaks down problem, considers edge cases upfront, efficient approach\"\n      3: \"Gets to solution but misses edge cases or takes indirect path\"\n      1: \"Struggles to break down problem, no clear approach\"\n    \n    code_quality:\n      5: \"Clean, readable, well-named, handles errors, testable\"\n      3: \"Works but messy, some error handling, reasonable naming\"\n      1: \"Barely works, no error handling, unclear naming\"\n    \n    communication:\n      5: \"Thinks aloud, explains trade-offs, asks clarifying questions\"\n      3: \"Some explanation, responds to prompts\"\n      1: \"Silent, defensive about suggestions, doesn't explain reasoning\"\n    \n    testing_awareness:\n      5: \"Writes tests unprompted, considers edge cases, talks about test strategy\"\n      3: \"Writes tests when prompted, covers happy path\"\n      1: \"No testing consideration\"\n    \n    speed_and_fluency:\n      5: \"Fast, clearly experienced, language/tooling fluent\"\n      3: \"Reasonable pace, occasional lookups\"\n      1: \"Very slow, struggles with syntax/tooling\"\n\n  do_not:\n    - Ask trick questions or gotchas\n    - Time pressure beyond reasonable\n    - Penalize for looking things up\n    - Judge IDE/editor choice\n    - Ask questions that require proprietary knowledge\n\nTake-Home Assessment Design\ntake_home:\n  time_limit: \"3-4 hours (honor system, state clearly)\"\n  deadline: \"5-7 days from send\"\n  \n  problem_design:\n    - Real-world scenario (not academic)\n    - Clear requirements with defined scope\n    - Extension section for candidates who want to show more\n    - Starter repo with CI, linting, test framework pre-configured\n    \n  deliverables:\n    required:\n      - Working solution\n      - Tests (at minimum: happy path + 2 edge cases)\n      - README explaining approach, trade-offs, what you'd improve\n    optional:\n      - Architecture diagram\n      - Performance analysis\n      - Additional features from extension section\n  \n  evaluation_rubric:\n    functionality: \"30% — Does it work? Edge cases handled?\"\n    code_quality: \"25% — Clean, readable, maintainable, well-structured\"\n    testing: \"20% — Coverage, meaningful tests, edge cases\"\n    documentation: \"15% — README quality, trade-off explanations\"\n    extras: \"10% — Extension features, thoughtful additions\"\n\n  anti_gaming:\n    - Check git history (single mega-commit = suspicious)\n    - Ask about implementation details in follow-up interview\n    - Vary the problem slightly across candidates\n    - Time the follow-up discussion: over-engineered solutions + can't explain = red flag\n\nSystem Design Assessment (Senior+)\nsystem_design:\n  duration: \"45-60 min\"\n  structure:\n    requirements: \"10 min — clarify scope, constraints, scale\"\n    high_level: \"15 min — components, data flow, API design\"\n    deep_dive: \"15 min — pick 1-2 areas to go deep\"\n    trade_offs: \"10 min — discuss alternatives, failure modes\"\n    extensions: \"5 min — how would this evolve?\"\n\n  evaluation:\n    requirements_gathering:\n      5: \"Asks about scale, users, latency requirements, budget before designing\"\n      3: \"Some clarifying questions but misses key constraints\"\n      1: \"Jumps straight to drawing boxes\"\n    \n    high_level_design:\n      5: \"Clear components with well-defined boundaries, data flows make sense\"\n      3: \"Reasonable architecture but some unclear responsibilities\"\n      1: \"Vague boxes with arrows, can't explain data flow\"\n    \n    depth:\n      5: \"Deep knowledge in chosen area, considers failure modes, cites real experience\"\n      3: \"Good knowledge but stays surface level\"\n      1: \"Can't go deep on any component\"\n    \n    trade_off_awareness:\n      5: \"Explicitly names trade-offs, compares alternatives, knows when each fits\"\n      3: \"Acknowledges trade-offs when prompted\"\n      1: \"Presents one approach as the only option\"\n    \n    scalability:\n      5: \"Considers growth path, bottleneck identification, realistic scaling strategy\"\n      3: \"Basic scaling awareness\"\n      1: \"No consideration of scale or unrealistic assumptions\"\n\nPhase 5: Evaluation & Decision\nPer-Interviewer Scorecard\ninterviewer_scorecard:\n  candidate: \"[name]\"\n  interviewer: \"[name]\"\n  stage: \"[which interview]\"\n  date: \"YYYY-MM-DD\"\n  \n  # Score BEFORE reading other interviewers' feedback\n  overall: 1-5  # 1=Strong No, 2=Lean No, 3=Neutral, 4=Lean Yes, 5=Strong Yes\n  \n  competency_scores:\n    - competency: \"[from scorecard]\"\n      score: 1-5\n      evidence: \"[Specific quote or behavior observed]\"\n      \n    - competency: \"[from scorecard]\"\n      score: 1-5\n      evidence: \"[Specific quote or behavior observed]\"\n  \n  green_signals:\n    - \"[Specific positive indicator with evidence]\"\n    \n  red_flags:\n    - \"[Specific concern with evidence]\"\n    \n  questions_for_next_interviewer:\n    - \"[What to probe further]\"\n\n  # IMPORTANT: Submit before debrief. Do not change after discussion.\n\nDebrief Protocol\n1. BEFORE debrief:\n   - All interviewers submit scorecards independently\n   - Hiring manager collects but does NOT share scores\n\n2. DEBRIEF structure (30-45 min):\n   a. Each interviewer states their overall vote FIRST (no explanation yet)\n      → This prevents anchoring bias from persuasive speakers\n   \n   b. Lowest scorer goes first (explain concerns)\n      → Prevents positive bias from drowning out concerns\n   \n   c. Highest scorer responds\n   \n   d. Open discussion — focus on EVIDENCE not feelings\n      → \"They seemed smart\" is not evidence\n      → \"They designed a cache invalidation strategy that handled...\" IS evidence\n   \n   e. Address conflicting signals:\n      → If strong yes + strong no on same competency, that's the discussion\n      → Resolve with: \"What specific behavior did you observe?\"\n   \n   f. Final vote (all interviewers):\n      → Strong Hire / Hire / No Hire / Strong No Hire\n      → Any \"Strong No Hire\" triggers discussion but NOT automatic rejection\n      → Hiring manager makes final call but must document reasoning\n\n3. AFTER debrief:\n   - Decision recorded with reasoning\n   - Feedback compiled for candidate (regardless of outcome)\n   - Action items assigned (offer prep or rejection with feedback)\n\nScoring Decision Matrix\nStrong Hire (all 4-5):\n  → Make offer within 24 hours\n  → Expedite process — strong candidates have multiple offers\n\nHire (mix of 3-5, no 1s):\n  → Make offer within 48 hours\n  → Address any 3-scores with targeted onboarding plan\n\nBorderline (mix of 2-4):\n  → Additional data needed — one more focused interview on weak areas\n  → Set a deadline: if still borderline after additional data → No Hire\n  → \"When in doubt, don't hire\" — the cost of a bad hire > cost of continuing search\n\nNo Hire (any 1, or multiple 2s):\n  → Decline with specific, constructive feedback\n  → Document clearly for future reference (candidate may reapply)\n\nStrong No Hire (multiple 1s or deal breaker):\n  → Immediate decline\n  → Review: did we miss this in screening? Fix the funnel.\n\nPhase 6: Bias Mitigation\nPre-Interview Bias Checks\nBefore each interview, remind yourself:\n□ I will evaluate against the SCORECARD, not my \"gut feeling\"\n□ I will give the same weight to disconfirming evidence as confirming\n□ I will not let one great/terrible answer color the entire evaluation\n□ I will not compare this candidate to the last one — compare to the scorecard\n□ I will note specific behaviors, not general impressions\n□ I will not evaluate \"culture fit\" as \"would I have a beer with them\"\n\nCommon Biases in Hiring\nBias\tWhat It Looks Like\tMitigation\nHalo effect\tGreat at coding → assume great at everything\tScore each competency independently\nHorn effect\tWeak communication → assume weak technically\tSame: score independently\nSimilarity bias\t\"Reminds me of me\" → favorable rating\tEvaluate against scorecard, not self\nAnchoring\tFirst impression sets the tone\tScore after all questions, not during\nConfirmation bias\tEarly positive → only notice positives\tActively look for counter-evidence\nContrast effect\tLooks great after a weak candidate\tCompare to scorecard, not other candidates\nRecency bias\tRemember last answer, forget first\tTake notes during interview\nAttribution error\tSuccess = skill, failure = circumstances\tProbe both: \"What went wrong? What helped?\"\nLeniency bias\tAvoid conflict, rate everyone 3-4\tForce yourself to use the full 1-5 scale\nUrgency bias\t\"We need someone NOW\" → lower bar\tNever lower scorecard standards — extend timeline instead\nStructured Interview Rules\nSame questions for same role — every candidate gets the same core questions\nScore immediately after — before discussing with anyone\nEvidence-based only — every score needs a specific observation\nDiverse panel — at least one interviewer from a different team/background\nBlind resume screen — remove name, school, company names for initial screen (if possible)\nNo leading questions — \"You're probably great at X, right?\" → \"Tell me about your experience with X\"\nTime-boxed — same duration for every candidate (don't cut short or extend based on vibes)\nPhase 7: Candidate Experience\nCommunication Templates\n\nAfter each stage — within 24 hours:\n\nADVANCING:\n\"Hi [name], thank you for your time today. We enjoyed our conversation about [specific topic]. \nWe'd like to move forward with [next stage]. [Interviewer name] will be speaking with you \nabout [topic]. Available times: [options]. \nAny questions before then? — [recruiter name]\"\n\nREJECTION (after phone screen):\n\"Hi [name], thank you for taking the time to speak with us about [role].\nAfter careful consideration, we've decided not to move forward at this stage.\n[One specific, constructive piece of feedback if appropriate].\nWe'll keep your information on file and may reach out for future opportunities that \nalign more closely. Wishing you the best in your search. — [name]\"\n\nREJECTION (after onsite):\n\"Hi [name], thank you for investing [X hours] in our interview process.\nWe were impressed by [specific positive], but ultimately decided to move forward \nwith a candidate whose [specific competency] more closely matches our current needs.\nFeedback: [1-2 specific, actionable items].\nWe genuinely appreciated your time and would welcome a future conversation \nif circumstances change. — [hiring manager name]\"\n\nOFFER (verbal, then written within 24h):\n\"Hi [name], I'm excited to share that we'd like to offer you the [role] position.\nWe were particularly impressed by [specific evidence from interviews].\nHere's what we're proposing: [comp summary]. I'll send the formal offer letter \nwithin 24 hours. Do you have any initial questions? — [hiring manager]\"\n\nCandidate Experience Scorecard\n\nAfter every hire (and quarterly for all candidates):\n\nDimension\tTarget\tHow to Measure\nTime to schedule\t< 48h between stages\tTrack in ATS\nInterviewer preparedness\t100% read scorecard before\tPost-interview survey\nCommunication timeliness\t< 24h response\tTrack in ATS\nFeedback quality\tSpecific + actionable\tCandidate survey\nOverall experience\t4+/5\tCandidate survey (all, not just hires)\nOffer acceptance rate\t> 80%\tTrack in ATS\nPhase 8: Process Audit & Improvement\nQuarterly Hiring Review\nquarterly_review:\n  period: \"Q[N] YYYY\"\n  \n  funnel_metrics:\n    applications: N\n    screens_passed: N  # → Screen pass rate\n    onsites: N         # → Onsite conversion rate  \n    offers: N          # → Offer rate\n    accepts: N         # → Acceptance rate\n    \n  quality_metrics:\n    ninety_day_retention: \"X%\"\n    manager_satisfaction_90d: \"X/5\"\n    time_to_productivity: \"X weeks\"\n    regretted_attrition_1yr: \"X%\"\n    \n  process_metrics:\n    time_to_fill: \"X days (target: <30)\"\n    time_in_stage:\n      screen: \"X days\"\n      onsite: \"X days\"  \n      decision: \"X days\"\n      offer: \"X days\"\n    interviewer_calibration: \"score variance across interviewers\"\n    \n  actions:\n    - \"[Improvement 1 based on metrics]\"\n    - \"[Improvement 2]\"\n\nInterview Question Effectiveness Tracking\n\nFor each question in your bank, track:\n\nquestion_effectiveness:\n  question: \"[question text]\"\n  times_asked: N\n  \n  signal_quality:\n    strong_differentiator: N  # Times this question clearly separated strong/weak\n    no_signal: N              # Times everyone answered similarly\n    confusing: N              # Times candidates misunderstood\n    \n  # If no_signal > 50% → Replace the question\n  # If confusing > 20% → Reword the question\n  # If strong_differentiator > 70% → Keep and promote\n\nInterviewer Calibration\nMonthly: Compare interviewer scores across candidates\n- Interviewer A averages 4.2, Interviewer B averages 2.8 → calibration needed\n- Run calibration session: review same candidate, discuss scoring differences\n- Goal: interviewers should be within 0.5 points on average for same candidates\n\nTraining for new interviewers:\n1. Shadow 3 interviews (observe, don't participate)\n2. Reverse shadow 2 interviews (conduct, observed by experienced interviewer)\n3. Solo with debrief for 3 interviews\n4. Full autonomy after calibration check\n\nEdge Cases\nInternal Candidates\nUse SAME scorecard as external (fairness)\nDifferent question strategy: focus on future role, not past (you already know their past)\nIf not selected: manager delivers feedback personally, development plan, timeline for re-candidacy\nNever promise the internal candidate gets special treatment\nExecutive Hiring\nAdd: reference checks (5+ structured, including back-channel)\nAdd: board/exec team dinner (culture, not evaluation)\nAdd: 90-day plan presentation as final stage\nExtended scorecard: strategic thinking, board management, talent magnetism\nUse executive search firm for sourcing, but own evaluation internally\nHigh-Volume Hiring (10+ same role)\nStandardize EVERYTHING: same questions, same rubric, same order\nUse structured scoring sheets, not free-form notes\nBatch calibration sessions weekly\nConsider: group assessment centers for initial stages\nTrack: quality variance across hiring managers (should be low)\nRemote/Async Interviews\nTest tech setup before the interview (not during)\nCamera on (both sides) — non-verbal cues matter\nRecord (with consent) for calibration purposes\nTake-home > live coding for timezone-challenged candidates\nBias alert: don't penalize for background noise, accent, or non-native English fluency\nBoomerang Employees\nTreat as new candidate (things change)\nSkip: basic company knowledge questions\nFocus: why they left, what changed, what they learned outside\nCheck: has the team/role changed since they left? Do current team members want them back?\nCounteroffers\nIf candidate receives counteroffer:\nDon't panic-increase. Your offer should already be fair.\n\"We made our best offer based on the value of the role. We'd love to have you, but understand if you decide to stay.\"\nStatistics: 80% of people who accept counteroffers leave within 18 months anyway\nIf they stay: respect it, keep the door open\nNatural Language Commands\nSay\tI Do\n\"Design interviews for [role]\"\tFull loop: scorecard + structure + questions + rubrics\n\"Create a scorecard for [role]\"\tA-Player definition with outcomes and competencies\n\"Generate behavioral questions for [competency]\"\tSTAR questions with probes and scoring\n\"Build a take-home for [role]\"\tAssessment with rubric and anti-gaming measures\n\"Design a system design interview for [level]\"\tStructure + evaluation rubric\n\"Evaluate candidate [name]\"\tStructured debrief template with scoring\n\"Create a phone screen for [role]\"\t30-min structured screen with pass/fail criteria\n\"Write rejection feedback for [candidate]\"\tSpecific, constructive rejection message\n\"Audit our interview process\"\tFull process review with metrics and recommendations\n\"Calibrate interviewers\"\tCalibration session plan with scoring alignment\n\"Design interview for [role] at [company stage]\"\tAdjusted for startup/growth/enterprise context\n\"Generate reference check questions for [role]\"\tStructured reference interview guide"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/1kalin/afrexai-interview-architect",
    "publisherUrl": "https://clawhub.ai/1kalin/afrexai-interview-architect",
    "owner": "1kalin",
    "version": "1.0.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/afrexai-interview-architect",
    "downloadUrl": "https://openagent3.xyz/downloads/afrexai-interview-architect",
    "agentUrl": "https://openagent3.xyz/skills/afrexai-interview-architect/agent",
    "manifestUrl": "https://openagent3.xyz/skills/afrexai-interview-architect/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/afrexai-interview-architect/agent.md"
  }
}