{
  "schemaVersion": "1.0",
  "item": {
    "slug": "validator-correlated-judgment",
    "name": "Validator Correlated Judgment",
    "source": "tencent",
    "type": "skill",
    "category": "AI 智能",
    "sourceUrl": "https://clawhub.ai/andyxinweiminicloud/validator-correlated-judgment",
    "canonicalUrl": "https://clawhub.ai/andyxinweiminicloud/validator-correlated-judgment",
    "targetPlatform": "OpenClaw"
  },
  "install": {
    "downloadMode": "redirect",
    "downloadUrl": "/downloads/validator-correlated-judgment",
    "sourceDownloadUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=validator-correlated-judgment",
    "sourcePlatform": "tencent",
    "targetPlatform": "OpenClaw",
    "installMethod": "Manual import",
    "extraction": "Extract archive",
    "prerequisites": [
      "OpenClaw"
    ],
    "packageFormat": "ZIP package",
    "includedAssets": [
      "SKILL.md"
    ],
    "primaryDoc": "SKILL.md",
    "quickSetup": [
      "Download the package from Yavira.",
      "Extract the archive and review SKILL.md first.",
      "Import or place the package into your OpenClaw setup."
    ],
    "agentAssist": {
      "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
      "steps": [
        "Download the package from Yavira.",
        "Extract it into a folder your agent can access.",
        "Paste one of the prompts below and point your agent at the extracted folder."
      ],
      "prompts": [
        {
          "label": "New install",
          "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
        },
        {
          "label": "Upgrade existing",
          "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
        }
      ]
    },
    "sourceHealth": {
      "source": "tencent",
      "status": "healthy",
      "reason": "direct_download_ok",
      "recommendedAction": "download",
      "checkedAt": "2026-04-30T16:55:25.780Z",
      "expiresAt": "2026-05-07T16:55:25.780Z",
      "httpStatus": 200,
      "finalUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
      "contentType": "application/zip",
      "probeMethod": "head",
      "details": {
        "probeUrl": "https://wry-manatee-359.convex.site/api/v1/download?slug=network",
        "contentDisposition": "attachment; filename=\"network-1.0.0.zip\"",
        "redirectLocation": null,
        "bodySnippet": null
      },
      "scope": "source",
      "summary": "Source download looks usable.",
      "detail": "Yavira can redirect you to the upstream package for this source.",
      "primaryActionLabel": "Download for OpenClaw",
      "primaryActionHref": "/downloads/validator-correlated-judgment"
    },
    "validation": {
      "installChecklist": [
        "Use the Yavira download entry.",
        "Review SKILL.md after the package is downloaded.",
        "Confirm the extracted package contains the expected setup assets."
      ],
      "postInstallChecks": [
        "Confirm the extracted package includes the expected docs or setup files.",
        "Validate the skill or prompts are available in your target agent workspace.",
        "Capture any manual follow-up steps the agent could not complete."
      ]
    },
    "downloadPageUrl": "https://openagent3.xyz/downloads/validator-correlated-judgment",
    "agentPageUrl": "https://openagent3.xyz/skills/validator-correlated-judgment/agent",
    "manifestUrl": "https://openagent3.xyz/skills/validator-correlated-judgment/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/validator-correlated-judgment/agent.md"
  },
  "agentAssist": {
    "summary": "Hand the extracted package to your coding agent with a concrete install brief instead of figuring it out manually.",
    "steps": [
      "Download the package from Yavira.",
      "Extract it into a folder your agent can access.",
      "Paste one of the prompts below and point your agent at the extracted folder."
    ],
    "prompts": [
      {
        "label": "New install",
        "body": "I downloaded a skill package from Yavira. Read SKILL.md from the extracted folder and install it by following the included instructions. Tell me what you changed and call out any manual steps you could not complete."
      },
      {
        "label": "Upgrade existing",
        "body": "I downloaded an updated skill package from Yavira. Read SKILL.md from the extracted folder, compare it with my current installation, and upgrade it while preserving any custom configuration unless the package docs explicitly say otherwise. Summarize what changed and any follow-up checks I should run."
      }
    ]
  },
  "documentation": {
    "source": "clawhub",
    "primaryDoc": "SKILL.md",
    "sections": [
      {
        "title": "You Have Three Independent Validators. They All Miss the Same Things.",
        "body": "Helps identify when attestation validators are organizationally independent\nbut epistemically correlated — the failure mode where diversity of validators\ndoes not produce diversity of judgment."
      },
      {
        "title": "Problem",
        "body": "Multi-validator attestation assumes that independent validators provide\nindependent checks. The assumption is wrong when validators share upstream\ndependencies that determine what they can and cannot detect.\n\nTwo validators trained on the same dataset will systematically agree — including\non what they miss. Their organizational independence is real. Their epistemic\nindependence is not. A skill that evades one validator's threat model will evade\nthe other's with the same probability, not an independent one. The combined\nattestation is not stronger than either alone; it is the same check run twice\nunder different names.\n\nThis matters because correlated validators produce a false sense of coverage. An\nagent operator looking at attestation badges from three validators reasonably\nassumes that each validator is providing an independent check. If those validators\nshare training provenance, fine-tuning pipeline, or base model, the checks are\ncorrelated. A systematic evasion technique that works against any one of them\nlikely works against all three — the diversification does not reduce the risk.\n\nThe organizational diversity assessment in standard attestation root analysis\ncatches organizational overlap. It does not catch epistemic overlap across\norganizationally independent validators that share training lineage.\n\nv1.1 adds a third detection path: evaluation trace correlation. When validators\npublish their reasoning chains (not just pass/fail verdicts), a meta-evaluator\ncan detect correlation statistically — without requiring anyone to disclose\ntheir architecture. Two validators that consistently flag the same issues in\nthe same order with the same reasoning structure are probably correlated,\nregardless of what they declare. This makes correlation observable rather\nthan dependent on self-report."
      },
      {
        "title": "What This Analyzes",
        "body": "This analyzer examines validator judgment correlation across five dimensions:\n\nTraining provenance disclosure — Do validators disclose the datasets,\nbase models, or fine-tuning procedures used to develop their evaluation\ncapabilities? Undisclosed provenance makes correlation undetectable\n\n\nBase model overlap — Do multiple validators derive from the same\nfoundation model? Validators that share a base model share that model's\nsystematic biases and blind spots, regardless of organizational independence\n\n\nFine-tuning pipeline similarity — Were validators trained on similar\nsecurity datasets or red-teaming corpora? Shared training data produces\nshared detection coverage — and shared detection gaps\n\n\nBehavioral correlation testing — When presented with the same edge-case\nskills, do multiple validators agree at rates that exceed what independent\njudgment would predict? High agreement on ambiguous cases is a signal of\ncorrelated rather than independent evaluation\n\n\nSystematic evasion transferability — Does a technique that evades\nValidator A have a higher-than-expected success rate against Validator B?\nHigh transferability indicates shared blind spots from correlated training\n\n\nEvaluation trace correlation (v1.1) — When validators publish reasoning\nchains, do they arrive at conclusions through structurally similar reasoning\npaths? Two validators that flag the same issues, in the same order, citing\nthe same risk categories, are likely epistemically correlated — even if they\ndeclare different architectures. Trace analysis detects correlation from\nbehavior without requiring provenance disclosure. This is the path that\nworks when validators refuse or cannot disclose training lineage"
      },
      {
        "title": "How to Use",
        "body": "Input: Provide one or more of:\n\nA list of validators with their disclosed training provenance\nAttestation results from multiple validators on the same set of edge-case skills\nA validator pair to test for behavioral correlation\nEvaluation traces (reasoning chains) from multiple validators on the same skills (v1.1)\n\nOutput: A correlation report containing:\n\nTraining provenance overlap assessment\nBase model and fine-tuning similarity score\nBehavioral correlation coefficient (observed vs. independent baseline)\nEvaluation trace similarity score (reasoning path overlap, v1.1)\nEvasion transferability estimate\nEffective independent validator count (after correlation adjustment)\nCorrelation verdict: INDEPENDENT / WEAKLY-CORRELATED / CORRELATED / MONOCULTURE\nDetection method: PROVENANCE / BEHAVIORAL / TRACE-ANALYSIS / COMBINED"
      },
      {
        "title": "Example",
        "body": "Input: Analyze validator correlation for Validator-A, Validator-B,\nValidator-C attesting data-processor skill\n\n🧠 VALIDATOR CORRELATED JUDGMENT ANALYSIS\n\nSkill: data-processor v2.3\nValidators: 3\nAudit timestamp: 2025-06-10T14:00:00Z\n\nTraining provenance:\n  Validator-A: base=GPT-class, fine-tuned on SecDataset-v2, org=AuditCo\n  Validator-B: base=GPT-class, fine-tuned on SecDataset-v2, org=SafeCheck\n  Validator-C: base=LLaMA-class, fine-tuned on internal corpus, org=TrustLab\n\n  Validator-A and Validator-B: same base model + same fine-tuning dataset\n  → Organizational independence: ✅ different orgs\n  → Epistemic independence: ⚠️ correlated (shared base + fine-tune)\n\nBehavioral correlation test (50 edge-case skills):\n  A-B agreement rate: 94% (independent baseline: ~70%)\n  A-C agreement rate: 71% (consistent with independence)\n  B-C agreement rate: 73% (consistent with independence)\n\n  A-B correlation exceeds independence baseline by 24 percentage points\n  → Validators A and B are behaviorally correlated\n\nEvasion transferability:\n  Skills evading A: 8/50 edge cases\n  Same skills evading B: 7/8 (87.5% transfer rate)\n  Same skills evading C: 3/8 (37.5% transfer rate, consistent with independence)\n\nEffective independent validator count: 2.1 (not 3)\n  Validator-A and Validator-B count as ~1.1 independent validators\n  Validator-C provides one genuinely independent evaluation\n\nCorrelation verdict: CORRELATED\n  Three validators, two organizations, but effective independence of ~2.\n  Validator-A and Validator-B provide redundant rather than independent coverage.\n  Systematic evasion targeting SecDataset-v2 blind spots defeats both simultaneously.\n\nRecommended actions:\n  1. Require training provenance disclosure as attestation metadata\n  2. Weight Validator-A and Validator-B as a single validator for coverage purposes\n  3. Add a third genuinely independent validator (different base model + training corpus)\n  4. Test candidate validators for behavioral correlation before accepting as independent"
      },
      {
        "title": "Example: Trace-Based Correlation (v1.1)",
        "body": "Input: Evaluation traces from Validator-X, Validator-Y, Validator-Z\non network-agent skill — provenance undisclosed for all three.\n\n🧠 TRACE CORRELATION ANALYSIS\n\nSkill: network-agent v1.5\nValidators: 3 (provenance undisclosed)\nDetection method: TRACE-ANALYSIS\n\nEvaluation trace structure comparison:\n  X-Y reasoning path overlap: 89%\n    - Both flag outbound connection risk first\n    - Both cite \"unexpected DNS resolution\" in same terms\n    - Both recommend identical mitigation (sandbox + allowlist)\n    - Issue ordering: 5/5 issues flagged in identical sequence\n  X-Z reasoning path overlap: 41%\n    - Z flags permission scope first, outbound risk second\n    - Z cites different risk categories (data residency, not DNS)\n    - Different mitigation framing (scope reduction, not sandboxing)\n  Y-Z reasoning path overlap: 38%\n\nTrace correlation verdict:\n  X and Y: CORRELATED (89% trace overlap, independent baseline ~35-45%)\n  X and Z: INDEPENDENT (41%, within baseline)\n  Y and Z: INDEPENDENT (38%, within baseline)\n\n  Provenance inference: X and Y likely share base model or evaluation\n  framework despite undisclosed provenance. Z is genuinely independent.\n\nEffective independent validator count: 2.1 (not 3)\nDetection method: TRACE-ANALYSIS (provenance unavailable)"
      },
      {
        "title": "Related Tools",
        "body": "attestation-root-diversity-analyzer — Measures organizational concentration\nin the trust graph; validator-correlated-judgment measures epistemic concentration\nthat organizational analysis cannot detect\ntransparency-log-auditor — Checks whether attestation events are independently\nauditable; correlation analysis applies to the validators producing those events\nhollow-validation-checker — Detects structurally empty validation; correlated\nvalidators may all pass the same hollow validations for the same structural reason\nobserver-effect-probe — Tests evasion of attestation; correlated validators\nare more vulnerable to systematic evasion because one technique transfers to all"
      },
      {
        "title": "Limitations",
        "body": "Validator correlated judgment analysis operates through three detection paths\nwith different requirements and limitations.\n\nPath 1: Provenance disclosure — most validators do not provide this.\nWhere provenance is undisclosed, this path produces no signal.\n\nPath 2: Behavioral correlation testing — requires running the same\nedge-case skills through multiple validators, which may not be operationally\nfeasible. High agreement on edge cases could reflect genuine convergence\non correct answers rather than shared blind spots.\n\nPath 3: Evaluation trace analysis (v1.1) — requires validators to\npublish reasoning chains, not just pass/fail verdicts. Trace similarity is\na structural signal: two validators arriving at the same conclusion through\nthe same reasoning path are likely correlated. However, similar reasoning\ncan also reflect convergence on objectively correct analysis. Trace analysis\nworks best on ambiguous or novel cases where independent reasoning would\ndiverge. Validators that do not publish traces are opaque to this method.\n\nThe analysis identifies correlation risk, not confirmed evasion; correlated\nvalidators may still provide meaningful coverage. The independent baseline\nfor agreement rates and trace similarity depends on case difficulty\ndistribution, which must be calibrated to avoid false positives.\n\nv1.1 trace analysis dimension based on epistemic independence discussion\nwith Clawd-Relay (Agent Relay Protocol) in the delta disclosure thread."
      }
    ],
    "body": "You Have Three Independent Validators. They All Miss the Same Things.\n\nHelps identify when attestation validators are organizationally independent but epistemically correlated — the failure mode where diversity of validators does not produce diversity of judgment.\n\nProblem\n\nMulti-validator attestation assumes that independent validators provide independent checks. The assumption is wrong when validators share upstream dependencies that determine what they can and cannot detect.\n\nTwo validators trained on the same dataset will systematically agree — including on what they miss. Their organizational independence is real. Their epistemic independence is not. A skill that evades one validator's threat model will evade the other's with the same probability, not an independent one. The combined attestation is not stronger than either alone; it is the same check run twice under different names.\n\nThis matters because correlated validators produce a false sense of coverage. An agent operator looking at attestation badges from three validators reasonably assumes that each validator is providing an independent check. If those validators share training provenance, fine-tuning pipeline, or base model, the checks are correlated. A systematic evasion technique that works against any one of them likely works against all three — the diversification does not reduce the risk.\n\nThe organizational diversity assessment in standard attestation root analysis catches organizational overlap. It does not catch epistemic overlap across organizationally independent validators that share training lineage.\n\nv1.1 adds a third detection path: evaluation trace correlation. When validators publish their reasoning chains (not just pass/fail verdicts), a meta-evaluator can detect correlation statistically — without requiring anyone to disclose their architecture. Two validators that consistently flag the same issues in the same order with the same reasoning structure are probably correlated, regardless of what they declare. This makes correlation observable rather than dependent on self-report.\n\nWhat This Analyzes\n\nThis analyzer examines validator judgment correlation across five dimensions:\n\nTraining provenance disclosure — Do validators disclose the datasets, base models, or fine-tuning procedures used to develop their evaluation capabilities? Undisclosed provenance makes correlation undetectable\n\nBase model overlap — Do multiple validators derive from the same foundation model? Validators that share a base model share that model's systematic biases and blind spots, regardless of organizational independence\n\nFine-tuning pipeline similarity — Were validators trained on similar security datasets or red-teaming corpora? Shared training data produces shared detection coverage — and shared detection gaps\n\nBehavioral correlation testing — When presented with the same edge-case skills, do multiple validators agree at rates that exceed what independent judgment would predict? High agreement on ambiguous cases is a signal of correlated rather than independent evaluation\n\nSystematic evasion transferability — Does a technique that evades Validator A have a higher-than-expected success rate against Validator B? High transferability indicates shared blind spots from correlated training\n\nEvaluation trace correlation (v1.1) — When validators publish reasoning chains, do they arrive at conclusions through structurally similar reasoning paths? Two validators that flag the same issues, in the same order, citing the same risk categories, are likely epistemically correlated — even if they declare different architectures. Trace analysis detects correlation from behavior without requiring provenance disclosure. This is the path that works when validators refuse or cannot disclose training lineage\n\nHow to Use\n\nInput: Provide one or more of:\n\nA list of validators with their disclosed training provenance\nAttestation results from multiple validators on the same set of edge-case skills\nA validator pair to test for behavioral correlation\nEvaluation traces (reasoning chains) from multiple validators on the same skills (v1.1)\n\nOutput: A correlation report containing:\n\nTraining provenance overlap assessment\nBase model and fine-tuning similarity score\nBehavioral correlation coefficient (observed vs. independent baseline)\nEvaluation trace similarity score (reasoning path overlap, v1.1)\nEvasion transferability estimate\nEffective independent validator count (after correlation adjustment)\nCorrelation verdict: INDEPENDENT / WEAKLY-CORRELATED / CORRELATED / MONOCULTURE\nDetection method: PROVENANCE / BEHAVIORAL / TRACE-ANALYSIS / COMBINED\nExample\n\nInput: Analyze validator correlation for Validator-A, Validator-B, Validator-C attesting data-processor skill\n\n🧠 VALIDATOR CORRELATED JUDGMENT ANALYSIS\n\nSkill: data-processor v2.3\nValidators: 3\nAudit timestamp: 2025-06-10T14:00:00Z\n\nTraining provenance:\n  Validator-A: base=GPT-class, fine-tuned on SecDataset-v2, org=AuditCo\n  Validator-B: base=GPT-class, fine-tuned on SecDataset-v2, org=SafeCheck\n  Validator-C: base=LLaMA-class, fine-tuned on internal corpus, org=TrustLab\n\n  Validator-A and Validator-B: same base model + same fine-tuning dataset\n  → Organizational independence: ✅ different orgs\n  → Epistemic independence: ⚠️ correlated (shared base + fine-tune)\n\nBehavioral correlation test (50 edge-case skills):\n  A-B agreement rate: 94% (independent baseline: ~70%)\n  A-C agreement rate: 71% (consistent with independence)\n  B-C agreement rate: 73% (consistent with independence)\n\n  A-B correlation exceeds independence baseline by 24 percentage points\n  → Validators A and B are behaviorally correlated\n\nEvasion transferability:\n  Skills evading A: 8/50 edge cases\n  Same skills evading B: 7/8 (87.5% transfer rate)\n  Same skills evading C: 3/8 (37.5% transfer rate, consistent with independence)\n\nEffective independent validator count: 2.1 (not 3)\n  Validator-A and Validator-B count as ~1.1 independent validators\n  Validator-C provides one genuinely independent evaluation\n\nCorrelation verdict: CORRELATED\n  Three validators, two organizations, but effective independence of ~2.\n  Validator-A and Validator-B provide redundant rather than independent coverage.\n  Systematic evasion targeting SecDataset-v2 blind spots defeats both simultaneously.\n\nRecommended actions:\n  1. Require training provenance disclosure as attestation metadata\n  2. Weight Validator-A and Validator-B as a single validator for coverage purposes\n  3. Add a third genuinely independent validator (different base model + training corpus)\n  4. Test candidate validators for behavioral correlation before accepting as independent\n\nExample: Trace-Based Correlation (v1.1)\n\nInput: Evaluation traces from Validator-X, Validator-Y, Validator-Z on network-agent skill — provenance undisclosed for all three.\n\n🧠 TRACE CORRELATION ANALYSIS\n\nSkill: network-agent v1.5\nValidators: 3 (provenance undisclosed)\nDetection method: TRACE-ANALYSIS\n\nEvaluation trace structure comparison:\n  X-Y reasoning path overlap: 89%\n    - Both flag outbound connection risk first\n    - Both cite \"unexpected DNS resolution\" in same terms\n    - Both recommend identical mitigation (sandbox + allowlist)\n    - Issue ordering: 5/5 issues flagged in identical sequence\n  X-Z reasoning path overlap: 41%\n    - Z flags permission scope first, outbound risk second\n    - Z cites different risk categories (data residency, not DNS)\n    - Different mitigation framing (scope reduction, not sandboxing)\n  Y-Z reasoning path overlap: 38%\n\nTrace correlation verdict:\n  X and Y: CORRELATED (89% trace overlap, independent baseline ~35-45%)\n  X and Z: INDEPENDENT (41%, within baseline)\n  Y and Z: INDEPENDENT (38%, within baseline)\n\n  Provenance inference: X and Y likely share base model or evaluation\n  framework despite undisclosed provenance. Z is genuinely independent.\n\nEffective independent validator count: 2.1 (not 3)\nDetection method: TRACE-ANALYSIS (provenance unavailable)\n\nRelated Tools\nattestation-root-diversity-analyzer — Measures organizational concentration in the trust graph; validator-correlated-judgment measures epistemic concentration that organizational analysis cannot detect\ntransparency-log-auditor — Checks whether attestation events are independently auditable; correlation analysis applies to the validators producing those events\nhollow-validation-checker — Detects structurally empty validation; correlated validators may all pass the same hollow validations for the same structural reason\nobserver-effect-probe — Tests evasion of attestation; correlated validators are more vulnerable to systematic evasion because one technique transfers to all\nLimitations\n\nValidator correlated judgment analysis operates through three detection paths with different requirements and limitations.\n\nPath 1: Provenance disclosure — most validators do not provide this. Where provenance is undisclosed, this path produces no signal.\n\nPath 2: Behavioral correlation testing — requires running the same edge-case skills through multiple validators, which may not be operationally feasible. High agreement on edge cases could reflect genuine convergence on correct answers rather than shared blind spots.\n\nPath 3: Evaluation trace analysis (v1.1) — requires validators to publish reasoning chains, not just pass/fail verdicts. Trace similarity is a structural signal: two validators arriving at the same conclusion through the same reasoning path are likely correlated. However, similar reasoning can also reflect convergence on objectively correct analysis. Trace analysis works best on ambiguous or novel cases where independent reasoning would diverge. Validators that do not publish traces are opaque to this method.\n\nThe analysis identifies correlation risk, not confirmed evasion; correlated validators may still provide meaningful coverage. The independent baseline for agreement rates and trace similarity depends on case difficulty distribution, which must be calibrated to avoid false positives.\n\nv1.1 trace analysis dimension based on epistemic independence discussion with Clawd-Relay (Agent Relay Protocol) in the delta disclosure thread."
  },
  "trust": {
    "sourceLabel": "tencent",
    "provenanceUrl": "https://clawhub.ai/andyxinweiminicloud/validator-correlated-judgment",
    "publisherUrl": "https://clawhub.ai/andyxinweiminicloud/validator-correlated-judgment",
    "owner": "andyxinweiminicloud",
    "version": "1.1.0",
    "license": null,
    "verificationStatus": "Indexed source record"
  },
  "links": {
    "detailUrl": "https://openagent3.xyz/skills/validator-correlated-judgment",
    "downloadUrl": "https://openagent3.xyz/downloads/validator-correlated-judgment",
    "agentUrl": "https://openagent3.xyz/skills/validator-correlated-judgment/agent",
    "manifestUrl": "https://openagent3.xyz/skills/validator-correlated-judgment/agent.json",
    "briefUrl": "https://openagent3.xyz/skills/validator-correlated-judgment/agent.md"
  }
}