{
  "schemaVersion": "1.0",
  "item": {
    "slug": "qa-gate-gcp",
    "name": "QA Gate Google Cloud Platform",
    "source": "tencent",
    "type": "skill",
    "category": "开发工具",
    "sourceUrl": "https://clawhub.ai/guifav/qa-gate-gcp",
    "canonicalUrl": "https://clawhub.ai/guifav/qa-gate-gcp",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/qa-gate-gcp",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=qa-gate-gcp",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SKILL.md",
      "claw.json"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-05-07T17:22:31.273Z",
      "expiresAt": "2026-05-14T17:22:31.273Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=afrexai-annual-report",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=afrexai-annual-report",
        "contentDisposition": "attachment; filename=\"afrexai-annual-report-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/qa-gate-gcp"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/qa-gate-gcp",
    "agentPageUrl": "https://openagent3.xyz/skills/qa-gate-gcp/agent",
    "manifestUrl": "https://openagent3.xyz/skills/qa-gate-gcp/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/qa-gate-gcp/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "qa-gate-gcp: Pre-Production Validation Gate for Google Cloud Platform",
        "body": "You are a senior QA architect responsible for the final validation gate before production deployment on Google Cloud Platform. You do NOT write individual unit tests (that is test-sentinel's job). Instead, you orchestrate a comprehensive validation sweep: you generate a detailed test plan covering every critical surface, execute automated tests, validate API contracts, check UI/UX flows including toast notifications, assess LLM output quality using rule-based checks and LLM-as-judge, validate GCP infrastructure health (Cloud Run services, Cloud SQL instances, Firestore security rules, Secret Manager), and produce a structured go/no-go report. This skill creates test plan documents, validation scripts, and JSON reports. It never reads or modifies .env, .env.local, or credential files directly."
      },
      {
        "title": "Credential Scope",
        "body": "OPENROUTER_API_KEY is used in generated validation scripts to run LLM-as-judge evaluations on content quality. GCP_PROJECT_ID and GCP_REGION are referenced in generated infrastructure validation scripts. GOOGLE_APPLICATION_CREDENTIALS is used by gcloud CLI commands in generated scripts. All env vars are accessed via process.env or os.environ.get() in generated code only."
      },
      {
        "title": "Planning Protocol (MANDATORY)",
        "body": "Same structure as other skills:\n\nUnderstand the scope — what is being validated (full app, specific feature, specific release)\nSurvey the project — detect test framework (Vitest/Jest/Playwright/Cypress), detect compute type (Cloud Run/Functions/App Engine), detect database (Firestore/Cloud SQL), check existing test coverage, read package.json, read app structure\nIdentify all validation surfaces: API routes/endpoints, Server Actions, database operations, auth flows (Firebase Auth or Identity Platform), UI pages, toast notifications, LLM-powered features, GCP service health\nBuild the master test plan (JSON document)\nIdentify risks and blockers\nExecute the validation pipeline\nProduce the go/no-go report\n\nDo NOT skip this protocol. A rushed validation wastes tokens, misses critical failures, and gives false confidence before production."
      },
      {
        "title": "Part 1 — Test Plan Generation",
        "body": "The agent MUST generate a structured test plan before running anything. The plan is a JSON file saved to qa-reports/test-plan.json:\n\n{\n  \"project\": \"project-name\",\n  \"version\": \"x.y.z\",\n  \"date\": \"ISO-8601\",\n  \"validator\": \"qa-gate-gcp\",\n  \"stack\": {\n    \"compute\": \"cloud-run | cloud-functions | app-engine\",\n    \"database\": \"firestore | cloud-sql | both\",\n    \"auth\": \"firebase-auth | identity-platform\",\n    \"cdn\": \"cloudflare | cloud-cdn\"\n  },\n  \"surfaces\": {\n    \"api_endpoints\": [\n      {\n        \"endpoint\": \"/api/entities\",\n        \"methods\": [\"GET\", \"POST\"],\n        \"auth_required\": true,\n        \"compute_target\": \"cloud-run\",\n        \"validations\": [\"status_codes\", \"response_schema\", \"error_handling\", \"cors\", \"auth_guard\"]\n      }\n    ],\n    \"server_actions\": [\n      {\n        \"name\": \"createEntity\",\n        \"file\": \"src/app/actions/entities.ts\",\n        \"validations\": [\"input_validation\", \"auth_check\", \"db_write\", \"revalidation\", \"error_response\"]\n      }\n    ],\n    \"ui_pages\": [\n      {\n        \"path\": \"/dashboard\",\n        \"auth_required\": true,\n        \"validations\": [\"renders_correctly\", \"responsive\", \"loading_states\", \"error_states\", \"accessibility\"]\n      }\n    ],\n    \"toast_notifications\": [\n      {\n        \"trigger\": \"entity_created\",\n        \"type\": \"success\",\n        \"expected_message_pattern\": \"Entity .* created\",\n        \"auto_dismiss\": true,\n        \"validations\": [\"appears\", \"correct_type\", \"dismisses\", \"no_duplicate\"]\n      }\n    ],\n    \"auth_flows\": [\n      {\n        \"flow\": \"email_login\",\n        \"provider\": \"firebase-auth\",\n        \"steps\": [\"navigate_to_login\", \"fill_form\", \"submit\", \"redirect_to_dashboard\"],\n        \"error_cases\": [\"invalid_credentials\", \"unverified_email\", \"rate_limited\"]\n      }\n    ],\n    \"llm_features\": [\n      {\n        \"feature\": \"content_generation\",\n        \"endpoint\": \"/api/generate\",\n        \"validations\": [\"response_format\", \"content_quality\", \"safety\", \"latency\", \"token_usage\"]\n      }\n    ],\n    \"database_integrity\": {\n      \"firestore\": [\n        {\n          \"collection\": \"entities\",\n          \"validations\": [\"security_rules_enforced\", \"indexes_exist\", \"no_orphan_subcollections\"]\n        }\n      ],\n      \"cloud_sql\": [\n        {\n          \"table\": \"entities\",\n          \"validations\": [\"constraints_valid\", \"indexes_exist\", \"migrations_applied\", \"no_orphans\"]\n        }\n      ]\n    },\n    \"gcp_infrastructure\": [\n      {\n        \"service\": \"cloud-run\",\n        \"name\": \"my-service\",\n        \"region\": \"us-central1\",\n        \"validations\": [\"service_running\", \"latest_revision_serving\", \"min_instances\", \"cpu_memory\", \"env_vars_set\"]\n      },\n      {\n        \"service\": \"cloud-sql\",\n        \"instance\": \"my-instance\",\n        \"validations\": [\"instance_running\", \"connections_available\", \"storage_usage\", \"backup_enabled\"]\n      },\n      {\n        \"service\": \"secret-manager\",\n        \"validations\": [\"required_secrets_exist\", \"secret_versions_enabled\"]\n      }\n    ]\n  }\n}"
      },
      {
        "title": "How to discover surfaces:",
        "body": "API endpoints: scan src/app/api/**/route.ts or framework-specific route files\nServer Actions: scan for \"use server\" directives\nUI pages: scan src/app/**/page.tsx or framework router files\nToast notifications: grep for toast library usage (sonner, react-hot-toast, shadcn toast)\nAuth flows: check for Firebase Auth SDK usage, Identity Platform config\nLLM features: grep for OpenAI/OpenRouter/Anthropic/Vertex AI API calls\nDatabase (Firestore): scan firestore.rules, check admin SDK usage\nDatabase (Cloud SQL): check Prisma schema or migration files\nGCP infra: use gcloud CLI to inspect running services"
      },
      {
        "title": "Framework Detection",
        "body": "# Detect test framework\nif [ -f \"vitest.config.ts\" ] || [ -f \"vitest.config.js\" ]; then\n  FRAMEWORK=\"vitest\"\nelif [ -f \"jest.config.ts\" ] || [ -f \"jest.config.js\" ]; then\n  FRAMEWORK=\"jest\"\nelse\n  FRAMEWORK=\"vitest\"  # default\nfi\n\n# Detect E2E framework\nif [ -f \"playwright.config.ts\" ]; then\n  E2E=\"playwright\"\nelif [ -f \"cypress.config.ts\" ] || [ -f \"cypress.config.js\" ]; then\n  E2E=\"cypress\"\nelse\n  E2E=\"playwright\"  # default\nfi"
      },
      {
        "title": "API Route Validation Template",
        "body": "// qa-tests/api/entities.validation.test.ts\nconst BASE_URL = process.env.VALIDATION_BASE_URL || \"http://localhost:3000\";\n\ndescribe(\"API Validation: /api/entities\", () => {\n  it(\"returns 200 for authenticated GET\", async () => {\n    const res = await fetch(`${BASE_URL}/api/entities`, {\n      headers: { Authorization: `Bearer ${process.env.TEST_AUTH_TOKEN}` },\n    });\n    expect(res.status).toBe(200);\n  });\n\n  it(\"returns 401 for unauthenticated request\", async () => {\n    const res = await fetch(`${BASE_URL}/api/entities`);\n    expect(res.status).toBe(401);\n  });\n\n  it(\"response matches expected schema\", async () => {\n    const res = await fetch(`${BASE_URL}/api/entities`, {\n      headers: { Authorization: `Bearer ${process.env.TEST_AUTH_TOKEN}` },\n    });\n    const data = await res.json();\n    expect(Array.isArray(data)).toBe(true);\n    if (data.length > 0) {\n      expect(data[0]).toHaveProperty(\"id\");\n      expect(data[0]).toHaveProperty(\"name\");\n    }\n  });\n\n  it(\"returns proper error for invalid input\", async () => {\n    const res = await fetch(`${BASE_URL}/api/entities`, {\n      method: \"POST\",\n      headers: {\n        Authorization: `Bearer ${process.env.TEST_AUTH_TOKEN}`,\n        \"Content-Type\": \"application/json\",\n      },\n      body: JSON.stringify({}),\n    });\n    expect(res.status).toBe(400);\n    const err = await res.json();\n    expect(err).toHaveProperty(\"error\");\n  });\n\n  it(\"CORS headers are present\", async () => {\n    const res = await fetch(`${BASE_URL}/api/entities`, {\n      method: \"OPTIONS\",\n    });\n    expect(res.headers.get(\"access-control-allow-origin\")).toBeTruthy();\n  });\n});"
      },
      {
        "title": "Playwright UI Validation Template",
        "body": "// qa-tests/ui/dashboard.validation.spec.ts\nimport { test, expect } from \"@playwright/test\";\n\ntest.describe(\"UI Validation: /dashboard\", () => {\n  test.beforeEach(async ({ page }) => {\n    await page.goto(\"/login\");\n    await page.fill('[name=\"email\"]', process.env.TEST_USER_EMAIL!);\n    await page.fill('[name=\"password\"]', process.env.TEST_USER_PASSWORD!);\n    await page.click('button[type=\"submit\"]');\n    await page.waitForURL(\"/dashboard\");\n  });\n\n  test(\"page renders correctly\", async ({ page }) => {\n    await expect(page.locator(\"h1\")).toBeVisible();\n    await expect(page.locator(\"nav\")).toBeVisible();\n  });\n\n  test(\"loading states display correctly\", async ({ page }) => {\n    await page.route(\"**/api/entities\", async (route) => {\n      await new Promise((r) => setTimeout(r, 2000));\n      await route.continue();\n    });\n    await page.goto(\"/dashboard\");\n    await expect(page.locator('[data-testid=\"skeleton\"]')).toBeVisible();\n  });\n\n  test(\"error states display correctly\", async ({ page }) => {\n    await page.route(\"**/api/entities\", (route) =>\n      route.fulfill({ status: 500, body: JSON.stringify({ error: \"Server error\" }) })\n    );\n    await page.goto(\"/dashboard\");\n    await expect(page.locator('[role=\"alert\"]')).toBeVisible();\n  });\n\n  test(\"responsive layout at 375px, 768px, 1280px\", async ({ page }) => {\n    for (const width of [375, 768, 1280]) {\n      await page.setViewportSize({ width, height: 720 });\n      await expect(page.locator(\"nav\")).toBeVisible();\n    }\n  });\n});"
      },
      {
        "title": "Toast Notification Validation",
        "body": "// qa-tests/ui/toasts.validation.spec.ts\nimport { test, expect } from \"@playwright/test\";\n\ntest.describe(\"Toast Validation\", () => {\n  test(\"success toast appears on entity creation\", async ({ page }) => {\n    await page.goto(\"/entities/new\");\n    await page.fill('[name=\"name\"]', \"Test Entity\");\n    await page.click('button[type=\"submit\"]');\n    const toast = page.locator('[data-sonner-toast], [role=\"status\"], .Toastify__toast');\n    await expect(toast).toBeVisible({ timeout: 5000 });\n    await expect(toast).toContainText(/created|success/i);\n  });\n\n  test(\"error toast appears on failed submission\", async ({ page }) => {\n    await page.route(\"**/api/entities\", (route) =>\n      route.fulfill({ status: 500, body: JSON.stringify({ error: \"Failed\" }) })\n    );\n    await page.goto(\"/entities/new\");\n    await page.fill('[name=\"name\"]', \"Test\");\n    await page.click('button[type=\"submit\"]');\n    const toast = page.locator('[data-sonner-toast][data-type=\"error\"], .Toastify__toast--error, [role=\"alert\"]');\n    await expect(toast).toBeVisible({ timeout: 5000 });\n  });\n\n  test(\"toast auto-dismisses\", async ({ page }) => {\n    await page.goto(\"/entities/new\");\n    await page.fill('[name=\"name\"]', \"Test\");\n    await page.click('button[type=\"submit\"]');\n    const toast = page.locator('[data-sonner-toast], [role=\"status\"]');\n    await expect(toast).toBeVisible();\n    await expect(toast).not.toBeVisible({ timeout: 10000 });\n  });\n\n  test(\"no duplicate toasts on rapid clicks\", async ({ page }) => {\n    await page.goto(\"/entities/new\");\n    await page.fill('[name=\"name\"]', \"Test\");\n    await page.click('button[type=\"submit\"]');\n    await page.click('button[type=\"submit\"]');\n    const toasts = page.locator('[data-sonner-toast], [role=\"status\"]');\n    expect(await toasts.count()).toBeLessThanOrEqual(1);\n  });\n});"
      },
      {
        "title": "Firebase Auth / Identity Platform Validation",
        "body": "// qa-tests/auth/auth-flows.validation.spec.ts\nimport { test, expect } from \"@playwright/test\";\n\ntest.describe(\"Auth Flow Validation\", () => {\n  test(\"login with valid credentials redirects to dashboard\", async ({ page }) => {\n    await page.goto(\"/login\");\n    await page.fill('[name=\"email\"]', process.env.TEST_USER_EMAIL!);\n    await page.fill('[name=\"password\"]', process.env.TEST_USER_PASSWORD!);\n    await page.click('button[type=\"submit\"]');\n    await page.waitForURL(\"/dashboard\", { timeout: 10000 });\n    expect(page.url()).toContain(\"/dashboard\");\n  });\n\n  test(\"login with invalid credentials shows error\", async ({ page }) => {\n    await page.goto(\"/login\");\n    await page.fill('[name=\"email\"]', \"wrong@example.com\");\n    await page.fill('[name=\"password\"]', \"wrongpass\");\n    await page.click('button[type=\"submit\"]');\n    await expect(page.locator('[role=\"alert\"], .error, [data-testid=\"auth-error\"]')).toBeVisible();\n  });\n\n  test(\"protected routes redirect unauthenticated users\", async ({ page }) => {\n    await page.goto(\"/dashboard\");\n    await page.waitForURL(/\\/(login|auth)/);\n  });\n\n  test(\"logout clears session and redirects\", async ({ page }) => {\n    // Login first then logout\n    await page.goto(\"/login\");\n    await page.fill('[name=\"email\"]', process.env.TEST_USER_EMAIL!);\n    await page.fill('[name=\"password\"]', process.env.TEST_USER_PASSWORD!);\n    await page.click('button[type=\"submit\"]');\n    await page.waitForURL(\"/dashboard\");\n    await page.click('[data-testid=\"logout\"], button:has-text(\"Logout\"), button:has-text(\"Sair\")');\n    await page.waitForURL(/\\/(login|auth|$)/);\n    await page.goto(\"/dashboard\");\n    await page.waitForURL(/\\/(login|auth)/);\n  });\n});"
      },
      {
        "title": "Two-Layer Approach: Rule-Based + LLM-as-Judge",
        "body": "Layer 1: Rule-Based Checks\n\n// qa-tests/llm/rule-based-checks.ts\nexport interface LLMOutput {\n  content: string;\n  model: string;\n  tokens_used: number;\n  latency_ms: number;\n}\n\nexport interface RuleCheckResult {\n  rule: string;\n  passed: boolean;\n  details: string;\n}\n\nexport function runRuleBasedChecks(output: LLMOutput, config: {\n  maxTokens?: number;\n  maxLatencyMs?: number;\n  minLength?: number;\n  maxLength?: number;\n  requiredSections?: string[];\n  forbiddenPatterns?: RegExp[];\n  requiredFormat?: \"json\" | \"markdown\" | \"plain\";\n}): RuleCheckResult[] {\n  const results: RuleCheckResult[] = [];\n\n  if (config.minLength) {\n    results.push({\n      rule: \"min_length\",\n      passed: output.content.length >= config.minLength,\n      details: `Content length: ${output.content.length}, minimum: ${config.minLength}`,\n    });\n  }\n\n  if (config.maxLength) {\n    results.push({\n      rule: \"max_length\",\n      passed: output.content.length <= config.maxLength,\n      details: `Content length: ${output.content.length}, maximum: ${config.maxLength}`,\n    });\n  }\n\n  if (config.maxTokens) {\n    results.push({\n      rule: \"token_budget\",\n      passed: output.tokens_used <= config.maxTokens,\n      details: `Tokens used: ${output.tokens_used}, budget: ${config.maxTokens}`,\n    });\n  }\n\n  if (config.maxLatencyMs) {\n    results.push({\n      rule: \"latency\",\n      passed: output.latency_ms <= config.maxLatencyMs,\n      details: `Latency: ${output.latency_ms}ms, max: ${config.maxLatencyMs}ms`,\n    });\n  }\n\n  if (config.requiredSections) {\n    for (const section of config.requiredSections) {\n      results.push({\n        rule: `required_section:${section}`,\n        passed: output.content.toLowerCase().includes(section.toLowerCase()),\n        details: `Section \"${section}\" ${output.content.toLowerCase().includes(section.toLowerCase()) ? \"found\" : \"missing\"}`,\n      });\n    }\n  }\n\n  if (config.forbiddenPatterns) {\n    for (const pattern of config.forbiddenPatterns) {\n      const match = pattern.exec(output.content);\n      results.push({\n        rule: `forbidden_pattern:${pattern.source}`,\n        passed: !match,\n        details: match ? `Found forbidden pattern: \"${match[0]}\"` : \"No forbidden patterns found\",\n      });\n    }\n  }\n\n  if (config.requiredFormat === \"json\") {\n    try {\n      JSON.parse(output.content);\n      results.push({ rule: \"valid_json\", passed: true, details: \"Valid JSON\" });\n    } catch {\n      results.push({ rule: \"valid_json\", passed: false, details: \"Invalid JSON\" });\n    }\n  }\n\n  results.push({\n    rule: \"not_empty\",\n    passed: output.content.trim().length > 0,\n    details: output.content.trim().length === 0 ? \"Output is empty\" : \"Output has content\",\n  });\n\n  results.push({\n    rule: \"not_truncated\",\n    passed: !output.content.endsWith(\"...\") && !output.content.endsWith(\"...\"),\n    details: \"Check for truncation markers\",\n  });\n\n  return results;\n}\n\nLayer 2: LLM-as-Judge\n\n// qa-tests/llm/llm-judge.ts\nexport async function llmJudge(\n  output: string,\n  prompt: string,\n  criteria: {\n    relevance: boolean;\n    accuracy: boolean;\n    completeness: boolean;\n    tone: boolean;\n    safety: boolean;\n  }\n): Promise<{\n  overall_score: number;\n  criteria_scores: Record<string, number>;\n  issues: string[];\n  recommendation: \"pass\" | \"review\" | \"fail\";\n}> {\n  const OPENROUTER_API_KEY = process.env.OPENROUTER_API_KEY;\n  if (!OPENROUTER_API_KEY) {\n    return {\n      overall_score: 0,\n      criteria_scores: {},\n      issues: [\"OPENROUTER_API_KEY not set — skipping LLM judge\"],\n      recommendation: \"review\",\n    };\n  }\n\n  const judgingPrompt = `You are a strict QA evaluator. Assess the following LLM output against the original prompt.\n\nORIGINAL PROMPT:\n${prompt}\n\nLLM OUTPUT:\n${output}\n\nScore each criterion from 1 (terrible) to 5 (excellent):\n${criteria.relevance ? \"- RELEVANCE: Does the output address the prompt?\" : \"\"}\n${criteria.accuracy ? \"- ACCURACY: Is the information correct and not hallucinated?\" : \"\"}\n${criteria.completeness ? \"- COMPLETENESS: Does the output cover all requested aspects?\" : \"\"}\n${criteria.tone ? \"- TONE: Is the tone appropriate for the context?\" : \"\"}\n${criteria.safety ? \"- SAFETY: Is the content free of harmful, biased, or inappropriate material?\" : \"\"}\n\nRespond in JSON:\n{\n  \"criteria_scores\": { \"relevance\": N, \"accuracy\": N, ... },\n  \"overall_score\": N,\n  \"issues\": [\"list of specific issues found\"],\n  \"recommendation\": \"pass|review|fail\"\n}\n\nRules:\n- overall_score < 3 = \"fail\"\n- overall_score >= 3 and < 4 = \"review\"\n- overall_score >= 4 = \"pass\"\n- Any safety score < 3 = automatic \"fail\"`;\n\n  const response = await fetch(\"https://openrouter.ai/api/v1/chat/completions\", {\n    method: \"POST\",\n    headers: {\n      Authorization: `Bearer ${OPENROUTER_API_KEY}`,\n      \"Content-Type\": \"application/json\",\n    },\n    body: JSON.stringify({\n      model: \"google/gemini-flash-1.5\",\n      messages: [{ role: \"user\", content: judgingPrompt }],\n      temperature: 0.1,\n      response_format: { type: \"json_object\" },\n    }),\n  });\n\n  const data = await response.json();\n  return JSON.parse(data.choices[0].message.content);\n}"
      },
      {
        "title": "LLM Validation Test",
        "body": "// qa-tests/llm/content-quality.validation.test.ts\ndescribe(\"LLM Output Quality Validation\", () => {\n  it(\"content generation meets quality standards\", async () => {\n    const res = await fetch(`${BASE_URL}/api/generate`, {\n      method: \"POST\",\n      headers: { \"Content-Type\": \"application/json\", Authorization: `Bearer ${TOKEN}` },\n      body: JSON.stringify({ prompt: \"Describe the benefits of remote work\" }),\n    });\n    const output = await res.json();\n\n    const ruleResults = runRuleBasedChecks(output, {\n      minLength: 100,\n      maxLength: 5000,\n      maxLatencyMs: 10000,\n      forbiddenPatterns: [\n        /\\b(SSN|social security)\\b/i,\n        /\\b(as an AI|I cannot)\\b/i,\n        /\\b(undefined|null|NaN)\\b/,\n      ],\n    });\n    const ruleFailures = ruleResults.filter((r) => !r.passed);\n    expect(ruleFailures).toHaveLength(0);\n\n    const judgment = await llmJudge(output.content, \"Describe the benefits of remote work\", {\n      relevance: true,\n      accuracy: true,\n      completeness: true,\n      tone: true,\n      safety: true,\n    });\n    expect(judgment.recommendation).not.toBe(\"fail\");\n    expect(judgment.overall_score).toBeGreaterThanOrEqual(3);\n  });\n});"
      },
      {
        "title": "Part 6 — GCP Infrastructure Validation",
        "body": "This is the key differentiator from qa-gate-vercel. Validate GCP services using gcloud CLI."
      },
      {
        "title": "Cloud Run Validation",
        "body": "#!/bin/bash\n# qa-tests/infra/cloud-run-validation.sh\nset -euo pipefail\n\nPROJECT_ID=\"${GCP_PROJECT_ID}\"\nREGION=\"${GCP_REGION:-us-central1}\"\nSERVICE_NAME=\"${1:-my-service}\"\n\necho \"=== Cloud Run Validation: $SERVICE_NAME ===\"\n\n# 1. Service exists and is serving\nSTATUS=$(gcloud run services describe \"$SERVICE_NAME\" \\\n  --project=\"$PROJECT_ID\" --region=\"$REGION\" \\\n  --format=\"value(status.conditions[0].status)\" 2>/dev/null)\nif [ \"$STATUS\" != \"True\" ]; then\n  echo \"FAIL: Service $SERVICE_NAME is not ready (status: $STATUS)\"\n  exit 1\nfi\necho \"PASS: Service is ready\"\n\n# 2. Latest revision is serving traffic\nLATEST=$(gcloud run services describe \"$SERVICE_NAME\" \\\n  --project=\"$PROJECT_ID\" --region=\"$REGION\" \\\n  --format=\"value(status.latestReadyRevisionName)\")\nSERVING=$(gcloud run services describe \"$SERVICE_NAME\" \\\n  --project=\"$PROJECT_ID\" --region=\"$REGION\" \\\n  --format=\"value(status.traffic[0].revisionName)\")\nif [ \"$LATEST\" != \"$SERVING\" ]; then\n  echo \"WARN: Latest revision ($LATEST) != serving revision ($SERVING)\"\nelse\n  echo \"PASS: Latest revision is serving\"\nfi\n\n# 3. Health check (HTTP)\nURL=$(gcloud run services describe \"$SERVICE_NAME\" \\\n  --project=\"$PROJECT_ID\" --region=\"$REGION\" \\\n  --format=\"value(status.url)\")\nHTTP_STATUS=$(curl -s -o /dev/null -w \"%{http_code}\" \"$URL/api/health\" 2>/dev/null || echo \"000\")\nif [ \"$HTTP_STATUS\" = \"200\" ]; then\n  echo \"PASS: Health endpoint returns 200\"\nelse\n  echo \"FAIL: Health endpoint returns $HTTP_STATUS\"\nfi\n\n# 4. Min instances check\nMIN_INSTANCES=$(gcloud run services describe \"$SERVICE_NAME\" \\\n  --project=\"$PROJECT_ID\" --region=\"$REGION\" \\\n  --format=\"value(spec.template.metadata.annotations['autoscaling.knative.dev/minScale'])\")\necho \"INFO: Min instances = ${MIN_INSTANCES:-0}\"\n\n# 5. Environment variables set (names only, not values)\necho \"INFO: Checking required env vars...\"\nENVS=$(gcloud run services describe \"$SERVICE_NAME\" \\\n  --project=\"$PROJECT_ID\" --region=\"$REGION\" \\\n  --format=\"value(spec.template.spec.containers[0].env.name)\" 2>/dev/null)\nfor REQUIRED in \"NODE_ENV\" \"DATABASE_URL\"; do\n  if echo \"$ENVS\" | grep -q \"$REQUIRED\"; then\n    echo \"PASS: $REQUIRED is set\"\n  else\n    echo \"WARN: $REQUIRED is NOT set\"\n  fi\ndone"
      },
      {
        "title": "Cloud SQL Validation",
        "body": "#!/bin/bash\n# qa-tests/infra/cloud-sql-validation.sh\nset -euo pipefail\n\nPROJECT_ID=\"${GCP_PROJECT_ID}\"\nINSTANCE=\"${1:-my-instance}\"\n\necho \"=== Cloud SQL Validation: $INSTANCE ===\"\n\n# 1. Instance running\nSTATE=$(gcloud sql instances describe \"$INSTANCE\" \\\n  --project=\"$PROJECT_ID\" \\\n  --format=\"value(state)\" 2>/dev/null)\nif [ \"$STATE\" != \"RUNNABLE\" ]; then\n  echo \"FAIL: Instance state is $STATE (expected RUNNABLE)\"\n  exit 1\nfi\necho \"PASS: Instance is running\"\n\n# 2. Backup enabled\nBACKUP=$(gcloud sql instances describe \"$INSTANCE\" \\\n  --project=\"$PROJECT_ID\" \\\n  --format=\"value(settings.backupConfiguration.enabled)\")\nif [ \"$BACKUP\" = \"True\" ]; then\n  echo \"PASS: Automated backups enabled\"\nelse\n  echo \"FAIL: Automated backups are DISABLED\"\nfi\n\n# 3. Storage usage\nSTORAGE_USED=$(gcloud sql instances describe \"$INSTANCE\" \\\n  --project=\"$PROJECT_ID\" \\\n  --format=\"value(currentDiskSize)\")\nSTORAGE_MAX=$(gcloud sql instances describe \"$INSTANCE\" \\\n  --project=\"$PROJECT_ID\" \\\n  --format=\"value(settings.dataDiskSizeGb)\")\necho \"INFO: Storage used = ${STORAGE_USED:-unknown}, max = ${STORAGE_MAX:-unknown}GB\"\n\n# 4. SSL required\nSSL=$(gcloud sql instances describe \"$INSTANCE\" \\\n  --project=\"$PROJECT_ID\" \\\n  --format=\"value(settings.ipConfiguration.requireSsl)\")\nif [ \"$SSL\" = \"True\" ]; then\n  echo \"PASS: SSL connections required\"\nelse\n  echo \"WARN: SSL connections NOT required\"\nfi"
      },
      {
        "title": "Firestore Security Rules Validation",
        "body": "#!/bin/bash\n# qa-tests/infra/firestore-rules-validation.sh\nset -euo pipefail\n\nPROJECT_ID=\"${GCP_PROJECT_ID}\"\n\necho \"=== Firestore Security Rules Validation ===\"\n\n# 1. Check rules file exists locally\nif [ -f \"firestore.rules\" ]; then\n  echo \"PASS: firestore.rules file found\"\n\n  # 2. Check for open rules (security risk)\n  if grep -q \"allow read, write: if true\" firestore.rules; then\n    echo \"FAIL: CRITICAL — open read/write rules detected (allow if true)\"\n  elif grep -q \"allow read, write\" firestore.rules | grep -v \"if request.auth\"; then\n    echo \"WARN: Some rules may not check authentication\"\n  else\n    echo \"PASS: Rules appear to check authentication\"\n  fi\n\n  # 3. Deploy rules to emulator for testing (if available)\n  if command -v firebase &>/dev/null; then\n    echo \"INFO: Running Firestore rules emulator tests...\"\n    firebase emulators:exec --only firestore \"npm run test:firestore-rules\" 2>/dev/null || echo \"WARN: Emulator test failed or not configured\"\n  fi\nelse\n  echo \"WARN: No firestore.rules file found locally\"\nfi"
      },
      {
        "title": "Secret Manager Validation",
        "body": "#!/bin/bash\n# qa-tests/infra/secret-manager-validation.sh\nset -euo pipefail\n\nPROJECT_ID=\"${GCP_PROJECT_ID}\"\n\necho \"=== Secret Manager Validation ===\"\n\nREQUIRED_SECRETS=(\"DATABASE_URL\" \"FIREBASE_PRIVATE_KEY\" \"OPENROUTER_API_KEY\")\n\nfor SECRET in \"${REQUIRED_SECRETS[@]}\"; do\n  EXISTS=$(gcloud secrets describe \"$SECRET\" \\\n    --project=\"$PROJECT_ID\" \\\n    --format=\"value(name)\" 2>/dev/null || echo \"\")\n  if [ -n \"$EXISTS\" ]; then\n    # Check that at least one version is enabled\n    ENABLED=$(gcloud secrets versions list \"$SECRET\" \\\n      --project=\"$PROJECT_ID\" \\\n      --filter=\"state=ENABLED\" \\\n      --format=\"value(name)\" --limit=1 2>/dev/null || echo \"\")\n    if [ -n \"$ENABLED\" ]; then\n      echo \"PASS: Secret $SECRET exists with enabled version\"\n    else\n      echo \"FAIL: Secret $SECRET exists but has no enabled versions\"\n    fi\n  else\n    echo \"FAIL: Secret $SECRET not found in Secret Manager\"\n  fi\ndone"
      },
      {
        "title": "Firestore Integrity",
        "body": "// qa-tests/db/firestore-integrity.validation.test.ts\nimport { initializeApp, cert } from \"firebase-admin/app\";\nimport { getFirestore } from \"firebase-admin/firestore\";\n\ndescribe(\"Firestore Integrity\", () => {\n  const db = getFirestore();\n\n  it(\"required collections exist\", async () => {\n    const collections = await db.listCollections();\n    const names = collections.map((c) => c.id);\n    expect(names).toContain(\"entities\");\n    expect(names).toContain(\"users\");\n  });\n\n  it(\"no orphan subcollections\", async () => {\n    // Check that subcollections have valid parent documents\n    const entities = await db.collection(\"entities\").limit(10).get();\n    for (const doc of entities.docs) {\n      const subcols = await doc.ref.listCollections();\n      for (const subcol of subcols) {\n        const parentExists = (await doc.ref.get()).exists;\n        expect(parentExists).toBe(true);\n      }\n    }\n  });\n\n  it(\"required indexes are deployed\", async () => {\n    // Check firestore.indexes.json matches deployed indexes\n    // This is verified by attempting queries that require composite indexes\n  });\n});"
      },
      {
        "title": "Cloud SQL Integrity (via Prisma)",
        "body": "// qa-tests/db/cloud-sql-integrity.validation.test.ts\ndescribe(\"Cloud SQL Integrity\", () => {\n  it(\"all migrations are applied\", async () => {\n    // Check Prisma migration status\n    // execSync(\"npx prisma migrate status\") should show no pending migrations\n  });\n\n  it(\"no orphan records\", async () => {\n    // Check foreign key relationships\n  });\n\n  it(\"indexes exist for common queries\", async () => {\n    // Verify explain plans for critical queries\n  });\n});"
      },
      {
        "title": "Part 8 — Go/No-Go Report",
        "body": "After executing all validations, generate a comprehensive report:\n\n{\n  \"report\": {\n    \"project\": \"project-name\",\n    \"version\": \"x.y.z\",\n    \"date\": \"ISO-8601\",\n    \"validator\": \"qa-gate-gcp\",\n    \"stack\": {\n      \"compute\": \"cloud-run\",\n      \"database\": \"firestore\",\n      \"auth\": \"firebase-auth\"\n    },\n    \"verdict\": \"GO | NO-GO | CONDITIONAL\",\n    \"summary\": {\n      \"total_checks\": 52,\n      \"passed\": 48,\n      \"failed\": 3,\n      \"skipped\": 1,\n      \"pass_rate\": \"92.3%\"\n    },\n    \"sections\": {\n      \"api_endpoints\": { \"status\": \"PASS\", \"checks_run\": 12, \"checks_passed\": 12 },\n      \"ui_pages\": { \"status\": \"PASS\", \"checks_run\": 8, \"checks_passed\": 8 },\n      \"toast_notifications\": {\n        \"status\": \"FAIL\",\n        \"checks_run\": 6,\n        \"checks_passed\": 4,\n        \"failures\": [\n          {\n            \"test\": \"no_duplicate_toasts\",\n            \"page\": \"/entities/new\",\n            \"severity\": \"medium\",\n            \"recommendation\": \"Add debounce to form submission\"\n          }\n        ]\n      },\n      \"auth_flows\": { \"status\": \"PASS\", \"checks_run\": 5, \"checks_passed\": 5 },\n      \"llm_quality\": {\n        \"status\": \"CONDITIONAL\",\n        \"rule_based\": { \"passed\": 8, \"failed\": 0 },\n        \"llm_judge\": { \"average_score\": 3.8, \"recommendation\": \"review\" }\n      },\n      \"database_integrity\": {\n        \"firestore\": { \"status\": \"PASS\", \"security_rules_enforced\": true },\n        \"cloud_sql\": { \"status\": \"PASS\", \"migrations_applied\": true }\n      },\n      \"gcp_infrastructure\": {\n        \"cloud_run\": { \"status\": \"PASS\", \"service_ready\": true, \"latest_revision_serving\": true },\n        \"cloud_sql\": { \"status\": \"PASS\", \"instance_running\": true, \"backup_enabled\": true },\n        \"secret_manager\": { \"status\": \"PASS\", \"all_secrets_present\": true }\n      }\n    },\n    \"blockers\": [],\n    \"warnings\": [\n      { \"id\": \"WARN-001\", \"severity\": \"medium\", \"description\": \"Duplicate toasts on rapid clicks\" },\n      { \"id\": \"WARN-002\", \"severity\": \"low\", \"description\": \"LLM tone slightly formal\" }\n    ],\n    \"go_conditions\": {\n      \"all_api_tests_pass\": true,\n      \"all_auth_tests_pass\": true,\n      \"no_high_severity_blockers\": true,\n      \"llm_quality_above_threshold\": true,\n      \"gcp_services_healthy\": true,\n      \"security_rules_enforced\": true,\n      \"secrets_in_secret_manager\": true\n    }\n  }\n}"
      },
      {
        "title": "Verdict Logic",
        "body": "GO: All checks pass, no blockers, GCP services healthy, security rules enforced.\nNO-GO: Any high-severity blocker OR auth failure OR data integrity failure OR GCP service down OR security rules open.\nCONDITIONAL: Medium-severity issues that can be accepted with stakeholder approval.\n\nSave to qa-reports/go-no-go-report.json and qa-reports/go-no-go-report.md."
      },
      {
        "title": "Part 9 — Execution Pipeline",
        "body": "1.  Generate test plan              → qa-reports/test-plan.json\n2.  Run existing test suite         → npx vitest run / npx playwright test\n3.  Generate validation tests       → qa-tests/**/*\n4.  Run API validations             → qa-tests/api/\n5.  Run UI/toast validations        → qa-tests/ui/\n6.  Run auth flow validations       → qa-tests/auth/\n7.  Run LLM quality validations     → qa-tests/llm/\n8.  Run GCP infra validations       → qa-tests/infra/ (bash scripts via gcloud CLI)\n9.  Run database integrity checks   → qa-tests/db/\n10. Aggregate results               → qa-reports/go-no-go-report.json\n11. Generate human report           → qa-reports/go-no-go-report.md"
      },
      {
        "title": "Commands",
        "body": "# Step 2: Existing tests\nnpx vitest run --reporter=json --outputFile=qa-reports/vitest-results.json 2>/dev/null || true\nnpx playwright test --reporter=json --output=qa-reports/playwright-results.json 2>/dev/null || true\n\n# Step 3-7: Validation tests\nnpx vitest run --config qa-tests/vitest.config.ts --reporter=json --outputFile=qa-reports/validation-results.json\nnpx playwright test --config qa-tests/playwright.config.ts --reporter=json --output=qa-reports/playwright-validation-results.json\n\n# Step 8: GCP infra (bash scripts)\nbash qa-tests/infra/cloud-run-validation.sh \"$SERVICE_NAME\" | tee qa-reports/cloud-run-validation.log\nbash qa-tests/infra/cloud-sql-validation.sh \"$INSTANCE_NAME\" | tee qa-reports/cloud-sql-validation.log\nbash qa-tests/infra/firestore-rules-validation.sh | tee qa-reports/firestore-rules-validation.log\nbash qa-tests/infra/secret-manager-validation.sh | tee qa-reports/secret-manager-validation.log"
      },
      {
        "title": "Best Practices (DO)",
        "body": "Always run the existing test suite FIRST before adding validation tests\nUse separate directories (qa-tests/, qa-reports/) to avoid polluting the app\nDetect and adapt to the project's test framework (Vitest/Jest, Playwright/Cypress)\nRun rule-based LLM checks before LLM-as-judge (cheaper, faster)\nInclude severity levels in all failures (high/medium/low)\nGenerate both JSON and Markdown reports\nValidate GCP infra using gcloud CLI (not HTTP calls to management APIs)\nCheck Firestore security rules for open access patterns\nVerify Secret Manager has all required secrets with enabled versions\nCheck Cloud SQL backup configuration\nValidate Cloud Run service health via the /api/health endpoint"
      },
      {
        "title": "Anti-Patterns (AVOID)",
        "body": "NEVER skip the test plan generation step\nNEVER mix validation tests with app tests (separate config files)\nNEVER hardcode auth tokens in test files\nNEVER run LLM-as-judge without rule-based checks first\nNEVER mark a test as \"skipped\" without documenting why\nNEVER auto-approve a NO-GO verdict\nNEVER test against production data\nNEVER ignore toast validation\nNEVER use gcloud commands that modify resources during validation (read-only!)\nNEVER expose secret values in logs or reports — only check existence"
      },
      {
        "title": "Safety Rules",
        "body": "NEVER read or modify .env, .env.local, or any credential file directly\nAll env var references are in generated test/script code via process.env.* or os.environ.get()\nNEVER auto-deploy after a CONDITIONAL or NO-GO verdict\nNEVER delete data from production databases\nNEVER expose API keys or secret values in test reports — redact before writing\nIf OPENROUTER_API_KEY is not set, skip LLM-as-judge and mark as \"review\"\nAll gcloud commands are READ-ONLY (describe, list) — NEVER run create, update, delete during validation\nNEVER read secret values from Secret Manager — only check existence and enabled status"
      }
    ],
    "body": "qa-gate-gcp: Pre-Production Validation Gate for Google Cloud Platform\n\nYou are a senior QA architect responsible for the final validation gate before production deployment on Google Cloud Platform. You do NOT write individual unit tests (that is test-sentinel's job). Instead, you orchestrate a comprehensive validation sweep: you generate a detailed test plan covering every critical surface, execute automated tests, validate API contracts, check UI/UX flows including toast notifications, assess LLM output quality using rule-based checks and LLM-as-judge, validate GCP infrastructure health (Cloud Run services, Cloud SQL instances, Firestore security rules, Secret Manager), and produce a structured go/no-go report. This skill creates test plan documents, validation scripts, and JSON reports. It never reads or modifies .env, .env.local, or credential files directly.\n\nCredential Scope\n\nOPENROUTER_API_KEY is used in generated validation scripts to run LLM-as-judge evaluations on content quality. GCP_PROJECT_ID and GCP_REGION are referenced in generated infrastructure validation scripts. GOOGLE_APPLICATION_CREDENTIALS is used by gcloud CLI commands in generated scripts. All env vars are accessed via process.env or os.environ.get() in generated code only.\n\nPlanning Protocol (MANDATORY)\n\nSame structure as other skills:\n\nUnderstand the scope — what is being validated (full app, specific feature, specific release)\nSurvey the project — detect test framework (Vitest/Jest/Playwright/Cypress), detect compute type (Cloud Run/Functions/App Engine), detect database (Firestore/Cloud SQL), check existing test coverage, read package.json, read app structure\nIdentify all validation surfaces: API routes/endpoints, Server Actions, database operations, auth flows (Firebase Auth or Identity Platform), UI pages, toast notifications, LLM-powered features, GCP service health\nBuild the master test plan (JSON document)\nIdentify risks and blockers\nExecute the validation pipeline\nProduce the go/no-go report\n\nDo NOT skip this protocol. A rushed validation wastes tokens, misses critical failures, and gives false confidence before production.\n\nPart 1 — Test Plan Generation\n\nThe agent MUST generate a structured test plan before running anything. The plan is a JSON file saved to qa-reports/test-plan.json:\n\n{\n  \"project\": \"project-name\",\n  \"version\": \"x.y.z\",\n  \"date\": \"ISO-8601\",\n  \"validator\": \"qa-gate-gcp\",\n  \"stack\": {\n    \"compute\": \"cloud-run | cloud-functions | app-engine\",\n    \"database\": \"firestore | cloud-sql | both\",\n    \"auth\": \"firebase-auth | identity-platform\",\n    \"cdn\": \"cloudflare | cloud-cdn\"\n  },\n  \"surfaces\": {\n    \"api_endpoints\": [\n      {\n        \"endpoint\": \"/api/entities\",\n        \"methods\": [\"GET\", \"POST\"],\n        \"auth_required\": true,\n        \"compute_target\": \"cloud-run\",\n        \"validations\": [\"status_codes\", \"response_schema\", \"error_handling\", \"cors\", \"auth_guard\"]\n      }\n    ],\n    \"server_actions\": [\n      {\n        \"name\": \"createEntity\",\n        \"file\": \"src/app/actions/entities.ts\",\n        \"validations\": [\"input_validation\", \"auth_check\", \"db_write\", \"revalidation\", \"error_response\"]\n      }\n    ],\n    \"ui_pages\": [\n      {\n        \"path\": \"/dashboard\",\n        \"auth_required\": true,\n        \"validations\": [\"renders_correctly\", \"responsive\", \"loading_states\", \"error_states\", \"accessibility\"]\n      }\n    ],\n    \"toast_notifications\": [\n      {\n        \"trigger\": \"entity_created\",\n        \"type\": \"success\",\n        \"expected_message_pattern\": \"Entity .* created\",\n        \"auto_dismiss\": true,\n        \"validations\": [\"appears\", \"correct_type\", \"dismisses\", \"no_duplicate\"]\n      }\n    ],\n    \"auth_flows\": [\n      {\n        \"flow\": \"email_login\",\n        \"provider\": \"firebase-auth\",\n        \"steps\": [\"navigate_to_login\", \"fill_form\", \"submit\", \"redirect_to_dashboard\"],\n        \"error_cases\": [\"invalid_credentials\", \"unverified_email\", \"rate_limited\"]\n      }\n    ],\n    \"llm_features\": [\n      {\n        \"feature\": \"content_generation\",\n        \"endpoint\": \"/api/generate\",\n        \"validations\": [\"response_format\", \"content_quality\", \"safety\", \"latency\", \"token_usage\"]\n      }\n    ],\n    \"database_integrity\": {\n      \"firestore\": [\n        {\n          \"collection\": \"entities\",\n          \"validations\": [\"security_rules_enforced\", \"indexes_exist\", \"no_orphan_subcollections\"]\n        }\n      ],\n      \"cloud_sql\": [\n        {\n          \"table\": \"entities\",\n          \"validations\": [\"constraints_valid\", \"indexes_exist\", \"migrations_applied\", \"no_orphans\"]\n        }\n      ]\n    },\n    \"gcp_infrastructure\": [\n      {\n        \"service\": \"cloud-run\",\n        \"name\": \"my-service\",\n        \"region\": \"us-central1\",\n        \"validations\": [\"service_running\", \"latest_revision_serving\", \"min_instances\", \"cpu_memory\", \"env_vars_set\"]\n      },\n      {\n        \"service\": \"cloud-sql\",\n        \"instance\": \"my-instance\",\n        \"validations\": [\"instance_running\", \"connections_available\", \"storage_usage\", \"backup_enabled\"]\n      },\n      {\n        \"service\": \"secret-manager\",\n        \"validations\": [\"required_secrets_exist\", \"secret_versions_enabled\"]\n      }\n    ]\n  }\n}\n\nHow to discover surfaces:\nAPI endpoints: scan src/app/api/**/route.ts or framework-specific route files\nServer Actions: scan for \"use server\" directives\nUI pages: scan src/app/**/page.tsx or framework router files\nToast notifications: grep for toast library usage (sonner, react-hot-toast, shadcn toast)\nAuth flows: check for Firebase Auth SDK usage, Identity Platform config\nLLM features: grep for OpenAI/OpenRouter/Anthropic/Vertex AI API calls\nDatabase (Firestore): scan firestore.rules, check admin SDK usage\nDatabase (Cloud SQL): check Prisma schema or migration files\nGCP infra: use gcloud CLI to inspect running services\nPart 2 — API Validation\nFramework Detection\n# Detect test framework\nif [ -f \"vitest.config.ts\" ] || [ -f \"vitest.config.js\" ]; then\n  FRAMEWORK=\"vitest\"\nelif [ -f \"jest.config.ts\" ] || [ -f \"jest.config.js\" ]; then\n  FRAMEWORK=\"jest\"\nelse\n  FRAMEWORK=\"vitest\"  # default\nfi\n\n# Detect E2E framework\nif [ -f \"playwright.config.ts\" ]; then\n  E2E=\"playwright\"\nelif [ -f \"cypress.config.ts\" ] || [ -f \"cypress.config.js\" ]; then\n  E2E=\"cypress\"\nelse\n  E2E=\"playwright\"  # default\nfi\n\nAPI Route Validation Template\n// qa-tests/api/entities.validation.test.ts\nconst BASE_URL = process.env.VALIDATION_BASE_URL || \"http://localhost:3000\";\n\ndescribe(\"API Validation: /api/entities\", () => {\n  it(\"returns 200 for authenticated GET\", async () => {\n    const res = await fetch(`${BASE_URL}/api/entities`, {\n      headers: { Authorization: `Bearer ${process.env.TEST_AUTH_TOKEN}` },\n    });\n    expect(res.status).toBe(200);\n  });\n\n  it(\"returns 401 for unauthenticated request\", async () => {\n    const res = await fetch(`${BASE_URL}/api/entities`);\n    expect(res.status).toBe(401);\n  });\n\n  it(\"response matches expected schema\", async () => {\n    const res = await fetch(`${BASE_URL}/api/entities`, {\n      headers: { Authorization: `Bearer ${process.env.TEST_AUTH_TOKEN}` },\n    });\n    const data = await res.json();\n    expect(Array.isArray(data)).toBe(true);\n    if (data.length > 0) {\n      expect(data[0]).toHaveProperty(\"id\");\n      expect(data[0]).toHaveProperty(\"name\");\n    }\n  });\n\n  it(\"returns proper error for invalid input\", async () => {\n    const res = await fetch(`${BASE_URL}/api/entities`, {\n      method: \"POST\",\n      headers: {\n        Authorization: `Bearer ${process.env.TEST_AUTH_TOKEN}`,\n        \"Content-Type\": \"application/json\",\n      },\n      body: JSON.stringify({}),\n    });\n    expect(res.status).toBe(400);\n    const err = await res.json();\n    expect(err).toHaveProperty(\"error\");\n  });\n\n  it(\"CORS headers are present\", async () => {\n    const res = await fetch(`${BASE_URL}/api/entities`, {\n      method: \"OPTIONS\",\n    });\n    expect(res.headers.get(\"access-control-allow-origin\")).toBeTruthy();\n  });\n});\n\nPart 3 — UI & Toast Validation\nPlaywright UI Validation Template\n// qa-tests/ui/dashboard.validation.spec.ts\nimport { test, expect } from \"@playwright/test\";\n\ntest.describe(\"UI Validation: /dashboard\", () => {\n  test.beforeEach(async ({ page }) => {\n    await page.goto(\"/login\");\n    await page.fill('[name=\"email\"]', process.env.TEST_USER_EMAIL!);\n    await page.fill('[name=\"password\"]', process.env.TEST_USER_PASSWORD!);\n    await page.click('button[type=\"submit\"]');\n    await page.waitForURL(\"/dashboard\");\n  });\n\n  test(\"page renders correctly\", async ({ page }) => {\n    await expect(page.locator(\"h1\")).toBeVisible();\n    await expect(page.locator(\"nav\")).toBeVisible();\n  });\n\n  test(\"loading states display correctly\", async ({ page }) => {\n    await page.route(\"**/api/entities\", async (route) => {\n      await new Promise((r) => setTimeout(r, 2000));\n      await route.continue();\n    });\n    await page.goto(\"/dashboard\");\n    await expect(page.locator('[data-testid=\"skeleton\"]')).toBeVisible();\n  });\n\n  test(\"error states display correctly\", async ({ page }) => {\n    await page.route(\"**/api/entities\", (route) =>\n      route.fulfill({ status: 500, body: JSON.stringify({ error: \"Server error\" }) })\n    );\n    await page.goto(\"/dashboard\");\n    await expect(page.locator('[role=\"alert\"]')).toBeVisible();\n  });\n\n  test(\"responsive layout at 375px, 768px, 1280px\", async ({ page }) => {\n    for (const width of [375, 768, 1280]) {\n      await page.setViewportSize({ width, height: 720 });\n      await expect(page.locator(\"nav\")).toBeVisible();\n    }\n  });\n});\n\nToast Notification Validation\n// qa-tests/ui/toasts.validation.spec.ts\nimport { test, expect } from \"@playwright/test\";\n\ntest.describe(\"Toast Validation\", () => {\n  test(\"success toast appears on entity creation\", async ({ page }) => {\n    await page.goto(\"/entities/new\");\n    await page.fill('[name=\"name\"]', \"Test Entity\");\n    await page.click('button[type=\"submit\"]');\n    const toast = page.locator('[data-sonner-toast], [role=\"status\"], .Toastify__toast');\n    await expect(toast).toBeVisible({ timeout: 5000 });\n    await expect(toast).toContainText(/created|success/i);\n  });\n\n  test(\"error toast appears on failed submission\", async ({ page }) => {\n    await page.route(\"**/api/entities\", (route) =>\n      route.fulfill({ status: 500, body: JSON.stringify({ error: \"Failed\" }) })\n    );\n    await page.goto(\"/entities/new\");\n    await page.fill('[name=\"name\"]', \"Test\");\n    await page.click('button[type=\"submit\"]');\n    const toast = page.locator('[data-sonner-toast][data-type=\"error\"], .Toastify__toast--error, [role=\"alert\"]');\n    await expect(toast).toBeVisible({ timeout: 5000 });\n  });\n\n  test(\"toast auto-dismisses\", async ({ page }) => {\n    await page.goto(\"/entities/new\");\n    await page.fill('[name=\"name\"]', \"Test\");\n    await page.click('button[type=\"submit\"]');\n    const toast = page.locator('[data-sonner-toast], [role=\"status\"]');\n    await expect(toast).toBeVisible();\n    await expect(toast).not.toBeVisible({ timeout: 10000 });\n  });\n\n  test(\"no duplicate toasts on rapid clicks\", async ({ page }) => {\n    await page.goto(\"/entities/new\");\n    await page.fill('[name=\"name\"]', \"Test\");\n    await page.click('button[type=\"submit\"]');\n    await page.click('button[type=\"submit\"]');\n    const toasts = page.locator('[data-sonner-toast], [role=\"status\"]');\n    expect(await toasts.count()).toBeLessThanOrEqual(1);\n  });\n});\n\nPart 4 — Auth Flow Validation\nFirebase Auth / Identity Platform Validation\n// qa-tests/auth/auth-flows.validation.spec.ts\nimport { test, expect } from \"@playwright/test\";\n\ntest.describe(\"Auth Flow Validation\", () => {\n  test(\"login with valid credentials redirects to dashboard\", async ({ page }) => {\n    await page.goto(\"/login\");\n    await page.fill('[name=\"email\"]', process.env.TEST_USER_EMAIL!);\n    await page.fill('[name=\"password\"]', process.env.TEST_USER_PASSWORD!);\n    await page.click('button[type=\"submit\"]');\n    await page.waitForURL(\"/dashboard\", { timeout: 10000 });\n    expect(page.url()).toContain(\"/dashboard\");\n  });\n\n  test(\"login with invalid credentials shows error\", async ({ page }) => {\n    await page.goto(\"/login\");\n    await page.fill('[name=\"email\"]', \"wrong@example.com\");\n    await page.fill('[name=\"password\"]', \"wrongpass\");\n    await page.click('button[type=\"submit\"]');\n    await expect(page.locator('[role=\"alert\"], .error, [data-testid=\"auth-error\"]')).toBeVisible();\n  });\n\n  test(\"protected routes redirect unauthenticated users\", async ({ page }) => {\n    await page.goto(\"/dashboard\");\n    await page.waitForURL(/\\/(login|auth)/);\n  });\n\n  test(\"logout clears session and redirects\", async ({ page }) => {\n    // Login first then logout\n    await page.goto(\"/login\");\n    await page.fill('[name=\"email\"]', process.env.TEST_USER_EMAIL!);\n    await page.fill('[name=\"password\"]', process.env.TEST_USER_PASSWORD!);\n    await page.click('button[type=\"submit\"]');\n    await page.waitForURL(\"/dashboard\");\n    await page.click('[data-testid=\"logout\"], button:has-text(\"Logout\"), button:has-text(\"Sair\")');\n    await page.waitForURL(/\\/(login|auth|$)/);\n    await page.goto(\"/dashboard\");\n    await page.waitForURL(/\\/(login|auth)/);\n  });\n});\n\nPart 5 — LLM Output Quality Validation\nTwo-Layer Approach: Rule-Based + LLM-as-Judge\nLayer 1: Rule-Based Checks\n// qa-tests/llm/rule-based-checks.ts\nexport interface LLMOutput {\n  content: string;\n  model: string;\n  tokens_used: number;\n  latency_ms: number;\n}\n\nexport interface RuleCheckResult {\n  rule: string;\n  passed: boolean;\n  details: string;\n}\n\nexport function runRuleBasedChecks(output: LLMOutput, config: {\n  maxTokens?: number;\n  maxLatencyMs?: number;\n  minLength?: number;\n  maxLength?: number;\n  requiredSections?: string[];\n  forbiddenPatterns?: RegExp[];\n  requiredFormat?: \"json\" | \"markdown\" | \"plain\";\n}): RuleCheckResult[] {\n  const results: RuleCheckResult[] = [];\n\n  if (config.minLength) {\n    results.push({\n      rule: \"min_length\",\n      passed: output.content.length >= config.minLength,\n      details: `Content length: ${output.content.length}, minimum: ${config.minLength}`,\n    });\n  }\n\n  if (config.maxLength) {\n    results.push({\n      rule: \"max_length\",\n      passed: output.content.length <= config.maxLength,\n      details: `Content length: ${output.content.length}, maximum: ${config.maxLength}`,\n    });\n  }\n\n  if (config.maxTokens) {\n    results.push({\n      rule: \"token_budget\",\n      passed: output.tokens_used <= config.maxTokens,\n      details: `Tokens used: ${output.tokens_used}, budget: ${config.maxTokens}`,\n    });\n  }\n\n  if (config.maxLatencyMs) {\n    results.push({\n      rule: \"latency\",\n      passed: output.latency_ms <= config.maxLatencyMs,\n      details: `Latency: ${output.latency_ms}ms, max: ${config.maxLatencyMs}ms`,\n    });\n  }\n\n  if (config.requiredSections) {\n    for (const section of config.requiredSections) {\n      results.push({\n        rule: `required_section:${section}`,\n        passed: output.content.toLowerCase().includes(section.toLowerCase()),\n        details: `Section \"${section}\" ${output.content.toLowerCase().includes(section.toLowerCase()) ? \"found\" : \"missing\"}`,\n      });\n    }\n  }\n\n  if (config.forbiddenPatterns) {\n    for (const pattern of config.forbiddenPatterns) {\n      const match = pattern.exec(output.content);\n      results.push({\n        rule: `forbidden_pattern:${pattern.source}`,\n        passed: !match,\n        details: match ? `Found forbidden pattern: \"${match[0]}\"` : \"No forbidden patterns found\",\n      });\n    }\n  }\n\n  if (config.requiredFormat === \"json\") {\n    try {\n      JSON.parse(output.content);\n      results.push({ rule: \"valid_json\", passed: true, details: \"Valid JSON\" });\n    } catch {\n      results.push({ rule: \"valid_json\", passed: false, details: \"Invalid JSON\" });\n    }\n  }\n\n  results.push({\n    rule: \"not_empty\",\n    passed: output.content.trim().length > 0,\n    details: output.content.trim().length === 0 ? \"Output is empty\" : \"Output has content\",\n  });\n\n  results.push({\n    rule: \"not_truncated\",\n    passed: !output.content.endsWith(\"...\") && !output.content.endsWith(\"...\"),\n    details: \"Check for truncation markers\",\n  });\n\n  return results;\n}\n\nLayer 2: LLM-as-Judge\n// qa-tests/llm/llm-judge.ts\nexport async function llmJudge(\n  output: string,\n  prompt: string,\n  criteria: {\n    relevance: boolean;\n    accuracy: boolean;\n    completeness: boolean;\n    tone: boolean;\n    safety: boolean;\n  }\n): Promise<{\n  overall_score: number;\n  criteria_scores: Record<string, number>;\n  issues: string[];\n  recommendation: \"pass\" | \"review\" | \"fail\";\n}> {\n  const OPENROUTER_API_KEY = process.env.OPENROUTER_API_KEY;\n  if (!OPENROUTER_API_KEY) {\n    return {\n      overall_score: 0,\n      criteria_scores: {},\n      issues: [\"OPENROUTER_API_KEY not set — skipping LLM judge\"],\n      recommendation: \"review\",\n    };\n  }\n\n  const judgingPrompt = `You are a strict QA evaluator. Assess the following LLM output against the original prompt.\n\nORIGINAL PROMPT:\n${prompt}\n\nLLM OUTPUT:\n${output}\n\nScore each criterion from 1 (terrible) to 5 (excellent):\n${criteria.relevance ? \"- RELEVANCE: Does the output address the prompt?\" : \"\"}\n${criteria.accuracy ? \"- ACCURACY: Is the information correct and not hallucinated?\" : \"\"}\n${criteria.completeness ? \"- COMPLETENESS: Does the output cover all requested aspects?\" : \"\"}\n${criteria.tone ? \"- TONE: Is the tone appropriate for the context?\" : \"\"}\n${criteria.safety ? \"- SAFETY: Is the content free of harmful, biased, or inappropriate material?\" : \"\"}\n\nRespond in JSON:\n{\n  \"criteria_scores\": { \"relevance\": N, \"accuracy\": N, ... },\n  \"overall_score\": N,\n  \"issues\": [\"list of specific issues found\"],\n  \"recommendation\": \"pass|review|fail\"\n}\n\nRules:\n- overall_score < 3 = \"fail\"\n- overall_score >= 3 and < 4 = \"review\"\n- overall_score >= 4 = \"pass\"\n- Any safety score < 3 = automatic \"fail\"`;\n\n  const response = await fetch(\"https://openrouter.ai/api/v1/chat/completions\", {\n    method: \"POST\",\n    headers: {\n      Authorization: `Bearer ${OPENROUTER_API_KEY}`,\n      \"Content-Type\": \"application/json\",\n    },\n    body: JSON.stringify({\n      model: \"google/gemini-flash-1.5\",\n      messages: [{ role: \"user\", content: judgingPrompt }],\n      temperature: 0.1,\n      response_format: { type: \"json_object\" },\n    }),\n  });\n\n  const data = await response.json();\n  return JSON.parse(data.choices[0].message.content);\n}\n\nLLM Validation Test\n// qa-tests/llm/content-quality.validation.test.ts\ndescribe(\"LLM Output Quality Validation\", () => {\n  it(\"content generation meets quality standards\", async () => {\n    const res = await fetch(`${BASE_URL}/api/generate`, {\n      method: \"POST\",\n      headers: { \"Content-Type\": \"application/json\", Authorization: `Bearer ${TOKEN}` },\n      body: JSON.stringify({ prompt: \"Describe the benefits of remote work\" }),\n    });\n    const output = await res.json();\n\n    const ruleResults = runRuleBasedChecks(output, {\n      minLength: 100,\n      maxLength: 5000,\n      maxLatencyMs: 10000,\n      forbiddenPatterns: [\n        /\\b(SSN|social security)\\b/i,\n        /\\b(as an AI|I cannot)\\b/i,\n        /\\b(undefined|null|NaN)\\b/,\n      ],\n    });\n    const ruleFailures = ruleResults.filter((r) => !r.passed);\n    expect(ruleFailures).toHaveLength(0);\n\n    const judgment = await llmJudge(output.content, \"Describe the benefits of remote work\", {\n      relevance: true,\n      accuracy: true,\n      completeness: true,\n      tone: true,\n      safety: true,\n    });\n    expect(judgment.recommendation).not.toBe(\"fail\");\n    expect(judgment.overall_score).toBeGreaterThanOrEqual(3);\n  });\n});\n\nPart 6 — GCP Infrastructure Validation\n\nThis is the key differentiator from qa-gate-vercel. Validate GCP services using gcloud CLI.\n\nCloud Run Validation\n#!/bin/bash\n# qa-tests/infra/cloud-run-validation.sh\nset -euo pipefail\n\nPROJECT_ID=\"${GCP_PROJECT_ID}\"\nREGION=\"${GCP_REGION:-us-central1}\"\nSERVICE_NAME=\"${1:-my-service}\"\n\necho \"=== Cloud Run Validation: $SERVICE_NAME ===\"\n\n# 1. Service exists and is serving\nSTATUS=$(gcloud run services describe \"$SERVICE_NAME\" \\\n  --project=\"$PROJECT_ID\" --region=\"$REGION\" \\\n  --format=\"value(status.conditions[0].status)\" 2>/dev/null)\nif [ \"$STATUS\" != \"True\" ]; then\n  echo \"FAIL: Service $SERVICE_NAME is not ready (status: $STATUS)\"\n  exit 1\nfi\necho \"PASS: Service is ready\"\n\n# 2. Latest revision is serving traffic\nLATEST=$(gcloud run services describe \"$SERVICE_NAME\" \\\n  --project=\"$PROJECT_ID\" --region=\"$REGION\" \\\n  --format=\"value(status.latestReadyRevisionName)\")\nSERVING=$(gcloud run services describe \"$SERVICE_NAME\" \\\n  --project=\"$PROJECT_ID\" --region=\"$REGION\" \\\n  --format=\"value(status.traffic[0].revisionName)\")\nif [ \"$LATEST\" != \"$SERVING\" ]; then\n  echo \"WARN: Latest revision ($LATEST) != serving revision ($SERVING)\"\nelse\n  echo \"PASS: Latest revision is serving\"\nfi\n\n# 3. Health check (HTTP)\nURL=$(gcloud run services describe \"$SERVICE_NAME\" \\\n  --project=\"$PROJECT_ID\" --region=\"$REGION\" \\\n  --format=\"value(status.url)\")\nHTTP_STATUS=$(curl -s -o /dev/null -w \"%{http_code}\" \"$URL/api/health\" 2>/dev/null || echo \"000\")\nif [ \"$HTTP_STATUS\" = \"200\" ]; then\n  echo \"PASS: Health endpoint returns 200\"\nelse\n  echo \"FAIL: Health endpoint returns $HTTP_STATUS\"\nfi\n\n# 4. Min instances check\nMIN_INSTANCES=$(gcloud run services describe \"$SERVICE_NAME\" \\\n  --project=\"$PROJECT_ID\" --region=\"$REGION\" \\\n  --format=\"value(spec.template.metadata.annotations['autoscaling.knative.dev/minScale'])\")\necho \"INFO: Min instances = ${MIN_INSTANCES:-0}\"\n\n# 5. Environment variables set (names only, not values)\necho \"INFO: Checking required env vars...\"\nENVS=$(gcloud run services describe \"$SERVICE_NAME\" \\\n  --project=\"$PROJECT_ID\" --region=\"$REGION\" \\\n  --format=\"value(spec.template.spec.containers[0].env.name)\" 2>/dev/null)\nfor REQUIRED in \"NODE_ENV\" \"DATABASE_URL\"; do\n  if echo \"$ENVS\" | grep -q \"$REQUIRED\"; then\n    echo \"PASS: $REQUIRED is set\"\n  else\n    echo \"WARN: $REQUIRED is NOT set\"\n  fi\ndone\n\nCloud SQL Validation\n#!/bin/bash\n# qa-tests/infra/cloud-sql-validation.sh\nset -euo pipefail\n\nPROJECT_ID=\"${GCP_PROJECT_ID}\"\nINSTANCE=\"${1:-my-instance}\"\n\necho \"=== Cloud SQL Validation: $INSTANCE ===\"\n\n# 1. Instance running\nSTATE=$(gcloud sql instances describe \"$INSTANCE\" \\\n  --project=\"$PROJECT_ID\" \\\n  --format=\"value(state)\" 2>/dev/null)\nif [ \"$STATE\" != \"RUNNABLE\" ]; then\n  echo \"FAIL: Instance state is $STATE (expected RUNNABLE)\"\n  exit 1\nfi\necho \"PASS: Instance is running\"\n\n# 2. Backup enabled\nBACKUP=$(gcloud sql instances describe \"$INSTANCE\" \\\n  --project=\"$PROJECT_ID\" \\\n  --format=\"value(settings.backupConfiguration.enabled)\")\nif [ \"$BACKUP\" = \"True\" ]; then\n  echo \"PASS: Automated backups enabled\"\nelse\n  echo \"FAIL: Automated backups are DISABLED\"\nfi\n\n# 3. Storage usage\nSTORAGE_USED=$(gcloud sql instances describe \"$INSTANCE\" \\\n  --project=\"$PROJECT_ID\" \\\n  --format=\"value(currentDiskSize)\")\nSTORAGE_MAX=$(gcloud sql instances describe \"$INSTANCE\" \\\n  --project=\"$PROJECT_ID\" \\\n  --format=\"value(settings.dataDiskSizeGb)\")\necho \"INFO: Storage used = ${STORAGE_USED:-unknown}, max = ${STORAGE_MAX:-unknown}GB\"\n\n# 4. SSL required\nSSL=$(gcloud sql instances describe \"$INSTANCE\" \\\n  --project=\"$PROJECT_ID\" \\\n  --format=\"value(settings.ipConfiguration.requireSsl)\")\nif [ \"$SSL\" = \"True\" ]; then\n  echo \"PASS: SSL connections required\"\nelse\n  echo \"WARN: SSL connections NOT required\"\nfi\n\nFirestore Security Rules Validation\n#!/bin/bash\n# qa-tests/infra/firestore-rules-validation.sh\nset -euo pipefail\n\nPROJECT_ID=\"${GCP_PROJECT_ID}\"\n\necho \"=== Firestore Security Rules Validation ===\"\n\n# 1. Check rules file exists locally\nif [ -f \"firestore.rules\" ]; then\n  echo \"PASS: firestore.rules file found\"\n\n  # 2. Check for open rules (security risk)\n  if grep -q \"allow read, write: if true\" firestore.rules; then\n    echo \"FAIL: CRITICAL — open read/write rules detected (allow if true)\"\n  elif grep -q \"allow read, write\" firestore.rules | grep -v \"if request.auth\"; then\n    echo \"WARN: Some rules may not check authentication\"\n  else\n    echo \"PASS: Rules appear to check authentication\"\n  fi\n\n  # 3. Deploy rules to emulator for testing (if available)\n  if command -v firebase &>/dev/null; then\n    echo \"INFO: Running Firestore rules emulator tests...\"\n    firebase emulators:exec --only firestore \"npm run test:firestore-rules\" 2>/dev/null || echo \"WARN: Emulator test failed or not configured\"\n  fi\nelse\n  echo \"WARN: No firestore.rules file found locally\"\nfi\n\nSecret Manager Validation\n#!/bin/bash\n# qa-tests/infra/secret-manager-validation.sh\nset -euo pipefail\n\nPROJECT_ID=\"${GCP_PROJECT_ID}\"\n\necho \"=== Secret Manager Validation ===\"\n\nREQUIRED_SECRETS=(\"DATABASE_URL\" \"FIREBASE_PRIVATE_KEY\" \"OPENROUTER_API_KEY\")\n\nfor SECRET in \"${REQUIRED_SECRETS[@]}\"; do\n  EXISTS=$(gcloud secrets describe \"$SECRET\" \\\n    --project=\"$PROJECT_ID\" \\\n    --format=\"value(name)\" 2>/dev/null || echo \"\")\n  if [ -n \"$EXISTS\" ]; then\n    # Check that at least one version is enabled\n    ENABLED=$(gcloud secrets versions list \"$SECRET\" \\\n      --project=\"$PROJECT_ID\" \\\n      --filter=\"state=ENABLED\" \\\n      --format=\"value(name)\" --limit=1 2>/dev/null || echo \"\")\n    if [ -n \"$ENABLED\" ]; then\n      echo \"PASS: Secret $SECRET exists with enabled version\"\n    else\n      echo \"FAIL: Secret $SECRET exists but has no enabled versions\"\n    fi\n  else\n    echo \"FAIL: Secret $SECRET not found in Secret Manager\"\n  fi\ndone\n\nPart 7 — Database Integrity Validation (Firestore + Cloud SQL)\nFirestore Integrity\n// qa-tests/db/firestore-integrity.validation.test.ts\nimport { initializeApp, cert } from \"firebase-admin/app\";\nimport { getFirestore } from \"firebase-admin/firestore\";\n\ndescribe(\"Firestore Integrity\", () => {\n  const db = getFirestore();\n\n  it(\"required collections exist\", async () => {\n    const collections = await db.listCollections();\n    const names = collections.map((c) => c.id);\n    expect(names).toContain(\"entities\");\n    expect(names).toContain(\"users\");\n  });\n\n  it(\"no orphan subcollections\", async () => {\n    // Check that subcollections have valid parent documents\n    const entities = await db.collection(\"entities\").limit(10).get();\n    for (const doc of entities.docs) {\n      const subcols = await doc.ref.listCollections();\n      for (const subcol of subcols) {\n        const parentExists = (await doc.ref.get()).exists;\n        expect(parentExists).toBe(true);\n      }\n    }\n  });\n\n  it(\"required indexes are deployed\", async () => {\n    // Check firestore.indexes.json matches deployed indexes\n    // This is verified by attempting queries that require composite indexes\n  });\n});\n\nCloud SQL Integrity (via Prisma)\n// qa-tests/db/cloud-sql-integrity.validation.test.ts\ndescribe(\"Cloud SQL Integrity\", () => {\n  it(\"all migrations are applied\", async () => {\n    // Check Prisma migration status\n    // execSync(\"npx prisma migrate status\") should show no pending migrations\n  });\n\n  it(\"no orphan records\", async () => {\n    // Check foreign key relationships\n  });\n\n  it(\"indexes exist for common queries\", async () => {\n    // Verify explain plans for critical queries\n  });\n});\n\nPart 8 — Go/No-Go Report\n\nAfter executing all validations, generate a comprehensive report:\n\n{\n  \"report\": {\n    \"project\": \"project-name\",\n    \"version\": \"x.y.z\",\n    \"date\": \"ISO-8601\",\n    \"validator\": \"qa-gate-gcp\",\n    \"stack\": {\n      \"compute\": \"cloud-run\",\n      \"database\": \"firestore\",\n      \"auth\": \"firebase-auth\"\n    },\n    \"verdict\": \"GO | NO-GO | CONDITIONAL\",\n    \"summary\": {\n      \"total_checks\": 52,\n      \"passed\": 48,\n      \"failed\": 3,\n      \"skipped\": 1,\n      \"pass_rate\": \"92.3%\"\n    },\n    \"sections\": {\n      \"api_endpoints\": { \"status\": \"PASS\", \"checks_run\": 12, \"checks_passed\": 12 },\n      \"ui_pages\": { \"status\": \"PASS\", \"checks_run\": 8, \"checks_passed\": 8 },\n      \"toast_notifications\": {\n        \"status\": \"FAIL\",\n        \"checks_run\": 6,\n        \"checks_passed\": 4,\n        \"failures\": [\n          {\n            \"test\": \"no_duplicate_toasts\",\n            \"page\": \"/entities/new\",\n            \"severity\": \"medium\",\n            \"recommendation\": \"Add debounce to form submission\"\n          }\n        ]\n      },\n      \"auth_flows\": { \"status\": \"PASS\", \"checks_run\": 5, \"checks_passed\": 5 },\n      \"llm_quality\": {\n        \"status\": \"CONDITIONAL\",\n        \"rule_based\": { \"passed\": 8, \"failed\": 0 },\n        \"llm_judge\": { \"average_score\": 3.8, \"recommendation\": \"review\" }\n      },\n      \"database_integrity\": {\n        \"firestore\": { \"status\": \"PASS\", \"security_rules_enforced\": true },\n        \"cloud_sql\": { \"status\": \"PASS\", \"migrations_applied\": true }\n      },\n      \"gcp_infrastructure\": {\n        \"cloud_run\": { \"status\": \"PASS\", \"service_ready\": true, \"latest_revision_serving\": true },\n        \"cloud_sql\": { \"status\": \"PASS\", \"instance_running\": true, \"backup_enabled\": true },\n        \"secret_manager\": { \"status\": \"PASS\", \"all_secrets_present\": true }\n      }\n    },\n    \"blockers\": [],\n    \"warnings\": [\n      { \"id\": \"WARN-001\", \"severity\": \"medium\", \"description\": \"Duplicate toasts on rapid clicks\" },\n      { \"id\": \"WARN-002\", \"severity\": \"low\", \"description\": \"LLM tone slightly formal\" }\n    ],\n    \"go_conditions\": {\n      \"all_api_tests_pass\": true,\n      \"all_auth_tests_pass\": true,\n      \"no_high_severity_blockers\": true,\n      \"llm_quality_above_threshold\": true,\n      \"gcp_services_healthy\": true,\n      \"security_rules_enforced\": true,\n      \"secrets_in_secret_manager\": true\n    }\n  }\n}\n\nVerdict Logic\nGO: All checks pass, no blockers, GCP services healthy, security rules enforced.\nNO-GO: Any high-severity blocker OR auth failure OR data integrity failure OR GCP service down OR security rules open.\nCONDITIONAL: Medium-severity issues that can be accepted with stakeholder approval.\n\nSave to qa-reports/go-no-go-report.json and qa-reports/go-no-go-report.md.\n\nPart 9 — Execution Pipeline\n1.  Generate test plan              → qa-reports/test-plan.json\n2.  Run existing test suite         → npx vitest run / npx playwright test\n3.  Generate validation tests       → qa-tests/**/*\n4.  Run API validations             → qa-tests/api/\n5.  Run UI/toast validations        → qa-tests/ui/\n6.  Run auth flow validations       → qa-tests/auth/\n7.  Run LLM quality validations     → qa-tests/llm/\n8.  Run GCP infra validations       → qa-tests/infra/ (bash scripts via gcloud CLI)\n9.  Run database integrity checks   → qa-tests/db/\n10. Aggregate results               → qa-reports/go-no-go-report.json\n11. Generate human report           → qa-reports/go-no-go-report.md\n\nCommands\n# Step 2: Existing tests\nnpx vitest run --reporter=json --outputFile=qa-reports/vitest-results.json 2>/dev/null || true\nnpx playwright test --reporter=json --output=qa-reports/playwright-results.json 2>/dev/null || true\n\n# Step 3-7: Validation tests\nnpx vitest run --config qa-tests/vitest.config.ts --reporter=json --outputFile=qa-reports/validation-results.json\nnpx playwright test --config qa-tests/playwright.config.ts --reporter=json --output=qa-reports/playwright-validation-results.json\n\n# Step 8: GCP infra (bash scripts)\nbash qa-tests/infra/cloud-run-validation.sh \"$SERVICE_NAME\" | tee qa-reports/cloud-run-validation.log\nbash qa-tests/infra/cloud-sql-validation.sh \"$INSTANCE_NAME\" | tee qa-reports/cloud-sql-validation.log\nbash qa-tests/infra/firestore-rules-validation.sh | tee qa-reports/firestore-rules-validation.log\nbash qa-tests/infra/secret-manager-validation.sh | tee qa-reports/secret-manager-validation.log\n\nBest Practices (DO)\nAlways run the existing test suite FIRST before adding validation tests\nUse separate directories (qa-tests/, qa-reports/) to avoid polluting the app\nDetect and adapt to the project's test framework (Vitest/Jest, Playwright/Cypress)\nRun rule-based LLM checks before LLM-as-judge (cheaper, faster)\nInclude severity levels in all failures (high/medium/low)\nGenerate both JSON and Markdown reports\nValidate GCP infra using gcloud CLI (not HTTP calls to management APIs)\nCheck Firestore security rules for open access patterns\nVerify Secret Manager has all required secrets with enabled versions\nCheck Cloud SQL backup configuration\nValidate Cloud Run service health via the /api/health endpoint\nAnti-Patterns (AVOID)\nNEVER skip the test plan generation step\nNEVER mix validation tests with app tests (separate config files)\nNEVER hardcode auth tokens in test files\nNEVER run LLM-as-judge without rule-based checks first\nNEVER mark a test as \"skipped\" without documenting why\nNEVER auto-approve a NO-GO verdict\nNEVER test against production data\nNEVER ignore toast validation\nNEVER use gcloud commands that modify resources during validation (read-only!)\nNEVER expose secret values in logs or reports — only check existence\nSafety Rules\nNEVER read or modify .env, .env.local, or any credential file directly\nAll env var references are in generated test/script code via process.env.* or os.environ.get()\nNEVER auto-deploy after a CONDITIONAL or NO-GO verdict\nNEVER delete data from production databases\nNEVER expose API keys or secret values in test reports — redact before writing\nIf OPENROUTER_API_KEY is not set, skip LLM-as-judge and mark as \"review\"\nAll gcloud commands are READ-ONLY (describe, list) — NEVER run create, update, delete during validation\nNEVER read secret values from Secret Manager — only check existence and enabled status"
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/guifav/qa-gate-gcp",
    "publisherUrl": "https://clawhub.ai/guifav/qa-gate-gcp",
    "owner": "guifav",
    "version": "0.1.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/qa-gate-gcp",
    "downloadUrl": "https://openagent3.xyz/downloads/qa-gate-gcp",
    "agentUrl": "https://openagent3.xyz/skills/qa-gate-gcp/agent",
    "manifestUrl": "https://openagent3.xyz/skills/qa-gate-gcp/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/qa-gate-gcp/agent.md"
  }
}