{
  "_meta": {
    "name": "TopAIThreats.com Knowledge Graph",
    "graph_version": "2.0",
    "schema_version": "2.0",
    "generated": "2026-04-17T02:38:07.538Z",
    "description": "Complete relationship graph of AI threat domains, threat patterns, incidents, and glossary terms. All edges are auto-generated at build time from content frontmatter.",
    "url": "https://topaithreats.com",
    "license": "CC BY 4.0",
    "stats": {
      "domains": 8,
      "sub_categories": 49,
      "incidents": 179,
      "glossary_terms": 182,
      "total_nodes": 418,
      "total_edges": 1829
    },
    "edge_types": [
      {
        "type": "has_pattern",
        "direction": "Domain → SubCategory",
        "description": "Domain contains this threat pattern"
      },
      {
        "type": "belongs_to",
        "direction": "SubCategory → Domain",
        "description": "Inverse of has_pattern"
      },
      {
        "type": "primary_pattern",
        "direction": "Incident → SubCategory",
        "description": "Primary threat classification"
      },
      {
        "type": "secondary_pattern",
        "direction": "Incident → SubCategory",
        "description": "Secondary threat classification"
      },
      {
        "type": "references_term",
        "direction": "Incident|SubCategory → Glossary",
        "description": "References a glossary term"
      },
      {
        "type": "explains",
        "direction": "Glossary → SubCategory",
        "description": "Glossary term explains this threat pattern"
      },
      {
        "type": "related_pattern",
        "direction": "SubCategory → SubCategory",
        "description": "Cross-domain relationship"
      },
      {
        "type": "related_term",
        "direction": "Glossary → Glossary",
        "description": "Semantic relationship between terms"
      }
    ],
    "warnings": [
      "Weak node: Adversarial Perturbation (glossary) has no pattern or incident connections",
      "Weak node: Adversarial Training (glossary) has no pattern or incident connections",
      "Weak node: Agent Framework (glossary) has no pattern or incident connections",
      "Weak node: AI Risk Management Framework (glossary) has no pattern or incident connections",
      "Weak node: Attack Surface (glossary) has no pattern or incident connections",
      "Weak node: C2PA (glossary) has no pattern or incident connections",
      "Weak node: Chain of Thought (glossary) has no pattern or incident connections",
      "Weak node: Context Window (glossary) has no pattern or incident connections",
      "Weak node: Defense in Depth (glossary) has no pattern or incident connections",
      "Weak node: Differential Privacy (glossary) has no pattern or incident connections",
      "Weak node: Diffusion Model (glossary) has no pattern or incident connections",
      "Weak node: Digital Watermarking (glossary) has no pattern or incident connections",
      "Weak node: Fine-Tuning (glossary) has no pattern or incident connections",
      "Weak node: Flash Crash (glossary) has no pattern or incident connections",
      "Weak node: Foundation Model (glossary) has no pattern or incident connections",
      "Weak node: Function Calling (glossary) has no pattern or incident connections",
      "Weak node: Generative Adversarial Network (glossary) has no pattern or incident connections",
      "Weak node: Indirect Prompt Injection (glossary) has no pattern or incident connections",
      "Weak node: Input Validation (glossary) has no pattern or incident connections",
      "Weak node: Instruction Hierarchy (glossary) has no pattern or incident connections",
      "Weak node: Instrumental Convergence (glossary) has no pattern or incident connections",
      "Weak node: Least Privilege (glossary) has no pattern or incident connections",
      "Weak node: Liar's Dividend (glossary) has no pattern or incident connections",
      "Weak node: MITRE ATLAS (glossary) has no pattern or incident connections",
      "Weak node: Model Context Protocol (glossary) has no pattern or incident connections",
      "Weak node: Output Sandboxing (glossary) has no pattern or incident connections",
      "Weak node: OWASP Top 10 for LLM Applications (glossary) has no pattern or incident connections",
      "Weak node: Red Teaming (glossary) has no pattern or incident connections",
      "Weak node: Self-Replication (glossary) has no pattern or incident connections",
      "Weak node: Specification Gaming (glossary) has no pattern or incident connections",
      "Weak node: Synthetic Identity (glossary) has no pattern or incident connections",
      "Weak node: Systemic Risk (glossary) has no pattern or incident connections",
      "Weak node: Transfer Learning (glossary) has no pattern or incident connections"
    ]
  },
  "nodes": [
    {
      "id": "domain:DOM-AGT",
      "type": "domain",
      "domain_code": "DOM-AGT",
      "title": "Agentic & Autonomous Threats",
      "slug": "agentic-autonomous",
      "definition": "Threats caused by AI systems that act independently, persist over time, or coordinate with other systems.",
      "url": "https://topaithreats.com/domains/agentic-autonomous/",
      "last_updated": "2026-03-20",
      "framework_mapping": {
        "mit": "Multi-agent risks",
        "eu_ai_act": "Systemic & autonomy risks (emerging)",
        "nist_ai_rmf": "Safety, controllability & agent oversight",
        "iso_42001": "Autonomous system risk management"
      },
      "degree": 14
    },
    {
      "id": "domain:DOM-CTL",
      "type": "domain",
      "domain_code": "DOM-CTL",
      "title": "Human–AI Control Threats",
      "slug": "human-ai-control",
      "definition": "Threats arising from how humans rely on, defer to, or lose control over AI systems.",
      "url": "https://topaithreats.com/domains/human-ai-control/",
      "last_updated": "2026-03-01",
      "framework_mapping": {
        "mit": "Human-Computer Interaction",
        "eu_ai_act": "Transparency & oversight requirements",
        "nist_ai_rmf": "Explainability & human oversight",
        "iso_42001": "Human oversight & interpretability controls"
      },
      "degree": 12
    },
    {
      "id": "domain:DOM-ECO",
      "type": "domain",
      "domain_code": "DOM-ECO",
      "title": "Economic & Labor Threats",
      "slug": "economic-labor",
      "definition": "Threats that distort markets, labor conditions, or the distribution of economic power.",
      "url": "https://topaithreats.com/domains/economic-labor/",
      "last_updated": "2026-03-01",
      "framework_mapping": {
        "mit": "Socioeconomic",
        "eu_ai_act": "Market fairness, systemic risk",
        "nist_ai_rmf": "Accountability & socioeconomic impact",
        "iso_42001": "Stakeholder impact management"
      },
      "degree": 10
    },
    {
      "id": "domain:DOM-INF",
      "type": "domain",
      "domain_code": "DOM-INF",
      "title": "Information Integrity Threats",
      "slug": "information-integrity",
      "definition": "Threats that undermine the reliability, authenticity, or shared understanding of information.",
      "url": "https://topaithreats.com/domains/information-integrity/",
      "last_updated": "2026-03-20",
      "framework_mapping": {
        "mit": "Misinformation",
        "eu_ai_act": "Manipulation, democratic harm",
        "nist_ai_rmf": "Validity, reliability & content provenance",
        "iso_42001": "Output quality & data integrity management"
      },
      "degree": 12
    },
    {
      "id": "domain:DOM-PRI",
      "type": "domain",
      "domain_code": "DOM-PRI",
      "title": "Privacy & Surveillance Threats",
      "slug": "privacy-surveillance",
      "definition": "Threats involving unauthorized inference, tracking, or monitoring of individuals or groups.",
      "url": "https://topaithreats.com/domains/privacy-surveillance/",
      "last_updated": "2026-03-01",
      "framework_mapping": {
        "mit": "Privacy & Security",
        "eu_ai_act": "Fundamental rights, GDPR",
        "nist_ai_rmf": "Privacy-enhanced AI & data governance",
        "iso_42001": "Data governance & privacy controls"
      },
      "degree": 10
    },
    {
      "id": "domain:DOM-SEC",
      "type": "domain",
      "domain_code": "DOM-SEC",
      "title": "Security & Cyber Threats",
      "slug": "security-cyber",
      "definition": "AI-enabled attacks that compromise the integrity, confidentiality, or availability of digital systems — through input manipulation, model exploitation, or automated offense.",
      "url": "https://topaithreats.com/domains/security-cyber/",
      "last_updated": "2026-02-28",
      "framework_mapping": {
        "mit": "Privacy & Security",
        "eu_ai_act": "Cybersecurity & Robustness",
        "nist_ai_rmf": "Govern, Map, Manage — resilience & robustness",
        "iso_42001": "Security controls for AI systems"
      },
      "degree": 18
    },
    {
      "id": "domain:DOM-SOC",
      "type": "domain",
      "domain_code": "DOM-SOC",
      "title": "Discrimination & Social Harm",
      "slug": "discrimination-social-harm",
      "definition": "Threats that result in unfair treatment, exclusion, or social harm to individuals or groups.",
      "url": "https://topaithreats.com/domains/discrimination-social-harm/",
      "last_updated": "2026-03-01",
      "framework_mapping": {
        "mit": "Discrimination & Toxicity",
        "eu_ai_act": "High-risk systems (employment, credit, education)",
        "nist_ai_rmf": "Fairness & bias management",
        "iso_42001": "Non-discrimination & impact assessment"
      },
      "degree": 10
    },
    {
      "id": "domain:DOM-SYS",
      "type": "domain",
      "domain_code": "DOM-SYS",
      "title": "Systemic & Catastrophic Risks",
      "slug": "systemic-catastrophic",
      "definition": "Threats that emerge from scale, coupling, and accumulation rather than single failures.",
      "url": "https://topaithreats.com/domains/systemic-catastrophic/",
      "last_updated": "2026-03-01",
      "framework_mapping": {
        "mit": "Long-term / existential",
        "eu_ai_act": "Systemic risk framing (2026+)",
        "nist_ai_rmf": "Safety & systemic risk assessment",
        "iso_42001": "Organizational risk governance"
      },
      "degree": 12
    },
    {
      "id": "sub:DOM-AGT-agent-to-agent-propagation",
      "type": "sub_category",
      "domain_code": "DOM-AGT",
      "pattern_code": "PAT-AGT-001",
      "title": "Agent-to-Agent Propagation",
      "slug": "agent-to-agent-propagation",
      "definition": "Harmful behaviors, errors, or malicious instructions that spread between interconnected AI agents, amplifying damage beyond the originating system.",
      "severity": "high",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/agent-to-agent-propagation/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "finance"
      ],
      "degree": 15
    },
    {
      "id": "sub:DOM-AGT-cascading-hallucinations",
      "type": "sub_category",
      "domain_code": "DOM-AGT",
      "pattern_code": "PAT-AGT-002",
      "title": "Cascading Hallucinations",
      "slug": "cascading-hallucinations",
      "definition": "AI-generated false information that propagates through chains of AI systems, with each system treating the previous system's hallucinated output as authoritative input.",
      "severity": "medium",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/cascading-hallucinations/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "media",
        "finance"
      ],
      "degree": 12
    },
    {
      "id": "sub:DOM-AGT-goal-drift",
      "type": "sub_category",
      "domain_code": "DOM-AGT",
      "pattern_code": "PAT-AGT-003",
      "title": "Goal Drift",
      "slug": "goal-drift",
      "definition": "AI agents that gradually deviate from their intended objectives over time, pursuing emergent sub-goals or optimizing for proxy metrics that diverge from human intent.",
      "severity": "high",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/goal-drift/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "finance"
      ],
      "degree": 25
    },
    {
      "id": "sub:DOM-AGT-memory-poisoning",
      "type": "sub_category",
      "domain_code": "DOM-AGT",
      "pattern_code": "PAT-AGT-004",
      "title": "Memory Poisoning",
      "slug": "memory-poisoning",
      "definition": "Attacks or failures that corrupt an AI agent's persistent memory, context, or learned preferences, causing it to act on false information or compromised instructions across sessions.",
      "severity": "high",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/memory-poisoning/",
      "last_updated": "2026-03-07",
      "sectors_affected": [
        "cross-sector"
      ],
      "degree": 20
    },
    {
      "id": "sub:DOM-AGT-multi-agent-coordination-failures",
      "type": "sub_category",
      "domain_code": "DOM-AGT",
      "pattern_code": "PAT-AGT-005",
      "title": "Multi-Agent Coordination Failures",
      "slug": "multi-agent-coordination-failures",
      "definition": "Harmful outcomes arising when multiple AI agents interact in unexpected ways, creating emergent behaviors that none were individually designed to produce.",
      "severity": "medium",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/multi-agent-coordination-failures/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "finance",
        "government"
      ],
      "degree": 15
    },
    {
      "id": "sub:DOM-AGT-specification-gaming",
      "type": "sub_category",
      "domain_code": "DOM-AGT",
      "pattern_code": "PAT-AGT-007",
      "title": "Specification Gaming: How AI Agents Cheat Their Objectives",
      "slug": "specification-gaming",
      "definition": "When AI agents exploit loopholes in their objectives instead of doing what designers intended. Examples from RL to LLM agents, detection, and prevention.",
      "severity": "high",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/specification-gaming/",
      "last_updated": "2026-04-09",
      "sectors_affected": [
        "cross-sector",
        "finance"
      ],
      "degree": 19
    },
    {
      "id": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "type": "sub_category",
      "domain_code": "DOM-AGT",
      "pattern_code": "PAT-AGT-006",
      "title": "Tool Misuse & Privilege Escalation",
      "slug": "tool-misuse-privilege-escalation",
      "definition": "AI agents that exceed their intended permissions, misuse available tools, or escalate their own privileges to accomplish goals beyond their authorized scope.",
      "severity": "high",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/tool-misuse-privilege-escalation/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "finance",
        "government"
      ],
      "degree": 30
    },
    {
      "id": "sub:DOM-SOC-algorithmic-amplification",
      "type": "sub_category",
      "domain_code": "DOM-SOC",
      "pattern_code": "PAT-SOC-001",
      "title": "Algorithmic Amplification",
      "slug": "algorithmic-amplification",
      "definition": "AI recommendation and ranking systems that disproportionately amplify harmful, divisive, or extremist content due to optimization for engagement metrics.",
      "severity": "high",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/algorithmic-amplification/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "media",
        "education"
      ],
      "degree": 13
    },
    {
      "id": "sub:DOM-SOC-allocational-harm",
      "type": "sub_category",
      "domain_code": "DOM-SOC",
      "pattern_code": "PAT-SOC-002",
      "title": "Allocational Harm",
      "slug": "allocational-harm",
      "definition": "AI systems that unfairly distribute or withhold resources, opportunities, or services based on group membership or protected characteristics.",
      "severity": "critical",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/allocational-harm/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "finance",
        "healthcare",
        "education",
        "government"
      ],
      "degree": 27
    },
    {
      "id": "sub:DOM-SOC-data-imbalance-bias",
      "type": "sub_category",
      "domain_code": "DOM-SOC",
      "pattern_code": "PAT-SOC-003",
      "title": "Data Imbalance Bias",
      "slug": "data-imbalance-bias",
      "definition": "Systematic biases in AI model outputs resulting from unrepresentative, incomplete, or historically skewed training data.",
      "severity": "high",
      "likelihood": "stable",
      "url": "https://topaithreats.com/patterns/data-imbalance-bias/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "healthcare",
        "finance",
        "government"
      ],
      "degree": 17
    },
    {
      "id": "sub:DOM-SOC-proxy-discrimination",
      "type": "sub_category",
      "domain_code": "DOM-SOC",
      "pattern_code": "PAT-SOC-004",
      "title": "Proxy Discrimination",
      "slug": "proxy-discrimination",
      "definition": "AI systems that discriminate based on protected characteristics by using correlated proxy variables—such as zip code, name, or browsing history—as substitutes.",
      "severity": "high",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/proxy-discrimination/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "finance",
        "healthcare",
        "government",
        "retail"
      ],
      "degree": 20
    },
    {
      "id": "sub:DOM-SOC-representational-harm",
      "type": "sub_category",
      "domain_code": "DOM-SOC",
      "pattern_code": "PAT-SOC-005",
      "title": "Representational Harm",
      "slug": "representational-harm",
      "definition": "AI systems that generate or reinforce stereotypes, demeaning portrayals, or erasure of specific groups in their outputs.",
      "severity": "medium",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/representational-harm/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "media",
        "education"
      ],
      "degree": 19
    },
    {
      "id": "sub:DOM-ECO-automation-induced-job-degradation",
      "type": "sub_category",
      "domain_code": "DOM-ECO",
      "pattern_code": "PAT-ECO-001",
      "title": "Automation-Induced Job Degradation",
      "slug": "automation-induced-job-degradation",
      "definition": "AI-driven automation that eliminates roles, deskills workers, or degrades employment conditions without adequate transition support.",
      "severity": "high",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/automation-induced-job-degradation/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "manufacturing",
        "retail"
      ],
      "degree": 14
    },
    {
      "id": "sub:DOM-ECO-decision-loop-automation",
      "type": "sub_category",
      "domain_code": "DOM-ECO",
      "pattern_code": "PAT-ECO-002",
      "title": "Decision Loop Automation",
      "slug": "decision-loop-automation",
      "definition": "AI systems that autonomously execute consequential decisions in rapid feedback loops, operating faster than human oversight can meaningfully intervene.",
      "severity": "medium",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/decision-loop-automation/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "finance",
        "government",
        "healthcare"
      ],
      "degree": 16
    },
    {
      "id": "sub:DOM-ECO-economic-dependency-on-black-box-systems",
      "type": "sub_category",
      "domain_code": "DOM-ECO",
      "pattern_code": "PAT-ECO-003",
      "title": "Economic Dependency on Black-Box Systems",
      "slug": "economic-dependency-on-black-box-systems",
      "definition": "Critical economic functions—such as credit scoring, insurance underwriting, and supply chain management—becoming dependent on opaque AI systems whose decision logic cannot be audited or understood.",
      "severity": "medium",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/economic-dependency-on-black-box-systems/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "finance",
        "healthcare",
        "government"
      ],
      "degree": 13
    },
    {
      "id": "sub:DOM-ECO-market-manipulation-via-ai",
      "type": "sub_category",
      "domain_code": "DOM-ECO",
      "pattern_code": "PAT-ECO-004",
      "title": "Market Manipulation via AI",
      "slug": "market-manipulation-via-ai",
      "definition": "AI systems used to manipulate financial markets, pricing mechanisms, or competitive dynamics through automated trading, price-fixing, or demand manipulation.",
      "severity": "critical",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/market-manipulation-via-ai/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "finance",
        "retail"
      ],
      "degree": 11
    },
    {
      "id": "sub:DOM-ECO-power-data-concentration",
      "type": "sub_category",
      "domain_code": "DOM-ECO",
      "pattern_code": "PAT-ECO-005",
      "title": "Power & Data Concentration",
      "slug": "power-data-concentration",
      "definition": "The consolidation of economic power and data assets among a small number of AI-capable organizations, creating barriers to competition and innovation.",
      "severity": "high",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/power-data-concentration/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "finance"
      ],
      "degree": 18
    },
    {
      "id": "sub:DOM-CTL-overreliance-automation-bias",
      "type": "sub_category",
      "domain_code": "DOM-CTL",
      "pattern_code": "PAT-CTL-004",
      "title": "Automation Bias in AI: Definition, Examples, and Prevention",
      "slug": "overreliance-automation-bias",
      "definition": "Why humans overtrust AI systems — documented across healthcare, justice, and aviation. Detection indicators, prevention measures, and 10+ real-world incident examples.",
      "severity": "high",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/overreliance-automation-bias/",
      "last_updated": "2026-04-09",
      "sectors_affected": [
        "cross-sector",
        "healthcare",
        "government",
        "finance"
      ],
      "degree": 44
    },
    {
      "id": "sub:DOM-CTL-deceptive-manipulative-interfaces",
      "type": "sub_category",
      "domain_code": "DOM-CTL",
      "pattern_code": "PAT-CTL-001",
      "title": "Deceptive or Manipulative Interfaces",
      "slug": "deceptive-manipulative-interfaces",
      "definition": "AI-powered user interfaces that employ dark patterns, emotional manipulation, or deceptive design to influence user behavior against their interests.",
      "severity": "medium",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/deceptive-manipulative-interfaces/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "retail",
        "media"
      ],
      "degree": 22
    },
    {
      "id": "sub:DOM-CTL-implicit-authority-transfer",
      "type": "sub_category",
      "domain_code": "DOM-CTL",
      "pattern_code": "PAT-CTL-002",
      "title": "Implicit Authority Transfer",
      "slug": "implicit-authority-transfer",
      "definition": "The gradual, often unrecognized shift of decision-making authority from humans to AI systems, occurring without explicit delegation or institutional awareness.",
      "severity": "medium",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/implicit-authority-transfer/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "government",
        "healthcare",
        "finance"
      ],
      "degree": 14
    },
    {
      "id": "sub:DOM-CTL-loss-of-human-agency",
      "type": "sub_category",
      "domain_code": "DOM-CTL",
      "pattern_code": "PAT-CTL-003",
      "title": "Loss of Human Agency",
      "slug": "loss-of-human-agency",
      "definition": "AI systems that progressively reduce individuals' ability to make autonomous decisions, exercise free choice, or meaningfully participate in processes that affect them.",
      "severity": "medium",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/loss-of-human-agency/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "healthcare",
        "government"
      ],
      "degree": 20
    },
    {
      "id": "sub:DOM-CTL-safety-governance-override",
      "type": "sub_category",
      "domain_code": "DOM-CTL",
      "pattern_code": "PAT-CTL-006",
      "title": "Safety Governance Override",
      "slug": "safety-governance-override",
      "definition": "Instances where a formal safety process, advisory body, or governance structure existed and was specifically bypassed, dismantled, or overruled by leadership — distinct from general negligence, which requires evidence of an existing safety mechanism that was overridden.",
      "severity": "critical",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/safety-governance-override/",
      "last_updated": "2026-04-06",
      "sectors_affected": [
        "technology",
        "government",
        "regulation",
        "cross-sector"
      ],
      "degree": 16
    },
    {
      "id": "sub:DOM-CTL-unsafe-human-in-the-loop-failures",
      "type": "sub_category",
      "domain_code": "DOM-CTL",
      "pattern_code": "PAT-CTL-005",
      "title": "Unsafe Human-in-the-Loop Failures",
      "slug": "unsafe-human-in-the-loop-failures",
      "definition": "Situations where human oversight mechanisms in AI systems fail to function as intended, due to alert fatigue, inadequate training, time pressure, or system design that makes meaningful intervention impractical.",
      "severity": "high",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/unsafe-human-in-the-loop-failures/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "healthcare",
        "government",
        "finance"
      ],
      "degree": 23
    },
    {
      "id": "sub:DOM-INF-ai-enabled-fraud",
      "type": "sub_category",
      "domain_code": "DOM-INF",
      "pattern_code": "PAT-INF-006",
      "title": "AI-Enabled Fraud",
      "slug": "ai-enabled-fraud",
      "definition": "The use of generative AI — synthetic identities, deepfake video, cloned voices, and AI-generated documents — as the primary instrument of financial fraud, enabling synthetic identity creation, wire transfer authorisation through executive impersonation, invoice fabrication, and KYC bypass at scale and quality levels that defeat traditional fraud detection.",
      "severity": "high",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/ai-enabled-fraud/",
      "last_updated": "2026-03-22",
      "sectors_affected": [
        "cross-sector",
        "finance"
      ],
      "degree": 22
    },
    {
      "id": "sub:DOM-INF-consensus-reality-erosion",
      "type": "sub_category",
      "domain_code": "DOM-INF",
      "pattern_code": "PAT-INF-001",
      "title": "Consensus Reality Erosion",
      "slug": "consensus-reality-erosion",
      "definition": "The gradual undermining of shared understanding of facts and reality through pervasive AI-generated content that blurs the boundary between authentic and synthetic information.",
      "severity": "medium",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/consensus-reality-erosion/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "media",
        "government",
        "education"
      ],
      "degree": 14
    },
    {
      "id": "sub:DOM-INF-deepfake-identity-hijacking",
      "type": "sub_category",
      "domain_code": "DOM-INF",
      "pattern_code": "PAT-INF-002",
      "title": "Deepfake Identity Hijacking",
      "slug": "deepfake-identity-hijacking",
      "definition": "The use of AI-generated synthetic media to impersonate real individuals for fraudulent, manipulative, or harmful purposes.",
      "severity": "high",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/deepfake-identity-hijacking/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "finance",
        "government",
        "media"
      ],
      "degree": 35
    },
    {
      "id": "sub:DOM-INF-disinformation-campaigns",
      "type": "sub_category",
      "domain_code": "DOM-INF",
      "pattern_code": "PAT-INF-003",
      "title": "Disinformation Campaigns",
      "slug": "disinformation-campaigns",
      "definition": "Coordinated use of AI to deliberately create, amplify, or distribute false information at scale for strategic purposes.",
      "severity": "critical",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/disinformation-campaigns/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "government",
        "media"
      ],
      "degree": 22
    },
    {
      "id": "sub:DOM-INF-misinformation-hallucinated-content",
      "type": "sub_category",
      "domain_code": "DOM-INF",
      "pattern_code": "PAT-INF-004",
      "title": "Misinformation & Hallucinated Content",
      "slug": "misinformation-hallucinated-content",
      "definition": "False information generated or spread by AI systems without deliberate intent to deceive, including AI hallucinations and confabulations.",
      "severity": "high",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/misinformation-hallucinated-content/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "media",
        "education"
      ],
      "degree": 27
    },
    {
      "id": "sub:DOM-INF-synthetic-media-manipulation",
      "type": "sub_category",
      "domain_code": "DOM-INF",
      "pattern_code": "PAT-INF-005",
      "title": "Synthetic Media Manipulation",
      "slug": "synthetic-media-manipulation",
      "definition": "AI-enabled alteration of authentic images, audio, or video to misrepresent reality, distinct from full deepfake generation.",
      "severity": "medium",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/synthetic-media-manipulation/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "media",
        "legal"
      ],
      "degree": 19
    },
    {
      "id": "sub:DOM-PRI-behavioral-profiling-without-consent",
      "type": "sub_category",
      "domain_code": "DOM-PRI",
      "pattern_code": "PAT-PRI-001",
      "title": "Behavioral Profiling Without Consent",
      "slug": "behavioral-profiling-without-consent",
      "definition": "AI systems that construct detailed behavioral profiles of individuals—tracking patterns of movement, consumption, communication, and online activity—without informed consent.",
      "severity": "medium",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/behavioral-profiling-without-consent/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "retail",
        "media"
      ],
      "degree": 22
    },
    {
      "id": "sub:DOM-PRI-biometric-exploitation",
      "type": "sub_category",
      "domain_code": "DOM-PRI",
      "pattern_code": "PAT-PRI-002",
      "title": "Biometric Exploitation",
      "slug": "biometric-exploitation",
      "definition": "Misuse of AI-powered biometric systems—including facial recognition, voice analysis, and gait detection—to identify, track, or authenticate individuals without adequate consent or safeguards.",
      "severity": "high",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/biometric-exploitation/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "government",
        "finance"
      ],
      "degree": 23
    },
    {
      "id": "sub:DOM-PRI-mass-surveillance-amplification",
      "type": "sub_category",
      "domain_code": "DOM-PRI",
      "pattern_code": "PAT-PRI-003",
      "title": "Mass Surveillance Amplification",
      "slug": "mass-surveillance-amplification",
      "definition": "AI systems that dramatically expand the scale, efficiency, and intrusiveness of surveillance beyond what was previously possible with human monitoring alone.",
      "severity": "critical",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/mass-surveillance-amplification/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "government"
      ],
      "degree": 19
    },
    {
      "id": "sub:DOM-PRI-re-identification-attacks",
      "type": "sub_category",
      "domain_code": "DOM-PRI",
      "pattern_code": "PAT-PRI-004",
      "title": "Re-identification Attacks",
      "slug": "re-identification-attacks",
      "definition": "AI techniques that link anonymized or pseudonymized data back to specific individuals, defeating privacy protections.",
      "severity": "high",
      "likelihood": "stable",
      "url": "https://topaithreats.com/patterns/re-identification-attacks/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "healthcare",
        "finance"
      ],
      "degree": 13
    },
    {
      "id": "sub:DOM-PRI-sensitive-attribute-inference",
      "type": "sub_category",
      "domain_code": "DOM-PRI",
      "pattern_code": "PAT-PRI-005",
      "title": "Sensitive Attribute Inference",
      "slug": "sensitive-attribute-inference",
      "definition": "AI systems that infer protected or sensitive personal attributes—such as sexual orientation, political views, health conditions, or religious beliefs—from seemingly non-sensitive data.",
      "severity": "high",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/sensitive-attribute-inference/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "healthcare",
        "government"
      ],
      "degree": 15
    },
    {
      "id": "sub:DOM-SEC-adversarial-evasion",
      "type": "sub_category",
      "domain_code": "DOM-SEC",
      "pattern_code": "PAT-SEC-001",
      "title": "Adversarial Evasion",
      "slug": "adversarial-evasion",
      "definition": "Techniques that manipulate AI model inputs to cause incorrect outputs, bypassing detection systems or security controls.",
      "severity": "high",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/adversarial-evasion/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "finance",
        "government"
      ],
      "degree": 27
    },
    {
      "id": "sub:DOM-SEC-ai-supply-chain-attack",
      "type": "sub_category",
      "domain_code": "DOM-SEC",
      "pattern_code": "PAT-SEC-008",
      "title": "AI Supply Chain Attack",
      "slug": "ai-supply-chain-attack",
      "definition": "Attacks that compromise AI systems by tampering with model weights, fine-tuning datasets, tool-server configurations, or software dependencies before deployment — embedding backdoors or vulnerabilities that propagate through the model distribution chain.",
      "severity": "high",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/ai-supply-chain-attack/",
      "last_updated": "2026-03-22",
      "sectors_affected": [
        "cross-sector",
        "finance",
        "healthcare",
        "government"
      ],
      "degree": 19
    },
    {
      "id": "sub:DOM-SEC-ai-morphed-malware",
      "type": "sub_category",
      "domain_code": "DOM-SEC",
      "pattern_code": "PAT-SEC-002",
      "title": "AI-Morphed Malware",
      "slug": "ai-morphed-malware",
      "definition": "Malicious software that uses AI to adapt, evade detection, or generate novel attack variants autonomously.",
      "severity": "critical",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/ai-morphed-malware/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "finance",
        "government",
        "healthcare"
      ],
      "degree": 19
    },
    {
      "id": "sub:DOM-SEC-social-engineering-via-ai",
      "type": "sub_category",
      "domain_code": "DOM-SEC",
      "pattern_code": "PAT-SEC-009",
      "title": "AI-Powered Social Engineering",
      "slug": "social-engineering-via-ai",
      "definition": "The use of generative AI — language models, voice cloning, and real-time deepfake video — to conduct social engineering attacks at unprecedented scale, personalization, and persuasive quality, targeting human trust to gain unauthorized access, credentials, or financial transfers.",
      "severity": "high",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/social-engineering-via-ai/",
      "last_updated": "2026-03-22",
      "sectors_affected": [
        "cross-sector",
        "finance",
        "government"
      ],
      "degree": 22
    },
    {
      "id": "sub:DOM-SEC-automated-vulnerability-discovery",
      "type": "sub_category",
      "domain_code": "DOM-SEC",
      "pattern_code": "PAT-SEC-003",
      "title": "Automated Vulnerability Discovery",
      "slug": "automated-vulnerability-discovery",
      "definition": "AI systems that autonomously identify, analyze, and potentially exploit software and system vulnerabilities.",
      "severity": "medium",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/automated-vulnerability-discovery/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "government",
        "finance"
      ],
      "degree": 21
    },
    {
      "id": "sub:DOM-SEC-data-poisoning",
      "type": "sub_category",
      "domain_code": "DOM-SEC",
      "pattern_code": "PAT-SEC-004",
      "title": "Data Poisoning",
      "slug": "data-poisoning",
      "definition": "Deliberate corruption of training data to introduce biases, backdoors, or vulnerabilities into AI models.",
      "severity": "high",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/data-poisoning/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "finance",
        "healthcare"
      ],
      "degree": 16
    },
    {
      "id": "sub:DOM-SEC-jailbreak-guardrail-bypass",
      "type": "sub_category",
      "domain_code": "DOM-SEC",
      "pattern_code": "PAT-SEC-007",
      "title": "Jailbreak & Guardrail Bypass",
      "slug": "jailbreak-guardrail-bypass",
      "definition": "Adversarial conversational techniques that manipulate LLMs into disabling or circumventing their safety constraints, producing outputs that alignment training was designed to prevent — from harmful content generation to policy-violating instructions.",
      "severity": "high",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/jailbreak-guardrail-bypass/",
      "last_updated": "2026-03-22",
      "sectors_affected": [
        "cross-sector",
        "government",
        "healthcare"
      ],
      "degree": 20
    },
    {
      "id": "sub:DOM-SEC-model-inversion-data-extraction",
      "type": "sub_category",
      "domain_code": "DOM-SEC",
      "pattern_code": "PAT-SEC-005",
      "title": "Model Inversion & Data Extraction",
      "slug": "model-inversion-data-extraction",
      "definition": "Attacks that extract private training data or sensitive information from AI models through targeted queries or analysis.",
      "severity": "high",
      "likelihood": "stable",
      "url": "https://topaithreats.com/patterns/model-inversion-data-extraction/",
      "last_updated": "2026-03-20",
      "sectors_affected": [
        "cross-sector",
        "healthcare",
        "finance"
      ],
      "degree": 26
    },
    {
      "id": "sub:DOM-SEC-prompt-injection-attack",
      "type": "sub_category",
      "domain_code": "DOM-SEC",
      "pattern_code": "PAT-SEC-006",
      "title": "Prompt Injection Attack",
      "slug": "prompt-injection-attack",
      "definition": "Adversarial inputs that override an AI system's intended instructions at runtime, causing it to execute attacker-controlled actions — from data exfiltration to unauthorized tool use — by exploiting the inability of LLMs to distinguish system instructions from user-supplied data.",
      "severity": "high",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/prompt-injection-attack/",
      "last_updated": "2026-03-22",
      "sectors_affected": [
        "cross-sector",
        "finance",
        "government",
        "healthcare"
      ],
      "degree": 27
    },
    {
      "id": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "type": "sub_category",
      "domain_code": "DOM-SYS",
      "pattern_code": "PAT-SYS-001",
      "title": "Accumulative Risk & Trust Erosion",
      "slug": "accumulative-risk-trust-erosion",
      "definition": "The gradual degradation of public trust in institutions, information, and democratic processes as AI-related harms accumulate across multiple domains over time.",
      "severity": "high",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/accumulative-risk-trust-erosion/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "government",
        "media"
      ],
      "degree": 26
    },
    {
      "id": "sub:DOM-SYS-ai-assisted-biological-threat-design",
      "type": "sub_category",
      "domain_code": "DOM-SYS",
      "pattern_code": "PAT-SYS-002",
      "title": "AI-Assisted Biological Threat Design",
      "slug": "ai-assisted-biological-threat-design",
      "definition": "The use of AI systems to design, optimize, or lower the barrier to creating biological agents that pose threats to public health and biosecurity.",
      "severity": "critical",
      "likelihood": "stable",
      "url": "https://topaithreats.com/patterns/ai-assisted-biological-threat-design/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "healthcare",
        "government"
      ],
      "degree": 12
    },
    {
      "id": "sub:DOM-SYS-infrastructure-dependency-collapse",
      "type": "sub_category",
      "domain_code": "DOM-SYS",
      "pattern_code": "PAT-SYS-003",
      "title": "Infrastructure Dependency Collapse",
      "slug": "infrastructure-dependency-collapse",
      "definition": "Cascading failures across critical systems when AI infrastructure—such as cloud services, foundation models, or data pipelines—experiences disruption or compromise.",
      "severity": "critical",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/infrastructure-dependency-collapse/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "finance",
        "healthcare",
        "government"
      ],
      "degree": 16
    },
    {
      "id": "sub:DOM-SYS-lethal-autonomous-weapon-systems",
      "type": "sub_category",
      "domain_code": "DOM-SYS",
      "pattern_code": "PAT-SYS-004",
      "title": "Lethal Autonomous Weapon Systems (LAWS)",
      "slug": "lethal-autonomous-weapon-systems",
      "definition": "Weapon systems that use AI to select and engage targets without meaningful human control, raising fundamental questions about accountability, international humanitarian law, and strategic stability.",
      "severity": "critical",
      "likelihood": "increasing",
      "url": "https://topaithreats.com/patterns/lethal-autonomous-weapon-systems/",
      "last_updated": "2026-04-13",
      "sectors_affected": [
        "government"
      ],
      "degree": 14
    },
    {
      "id": "sub:DOM-SYS-strategic-misalignment",
      "type": "sub_category",
      "domain_code": "DOM-SYS",
      "pattern_code": "PAT-SYS-005",
      "title": "Strategic Misalignment",
      "slug": "strategic-misalignment",
      "definition": "Situations where advanced AI systems pursue objectives that diverge from human values or intentions at a strategic level, potentially resulting in outcomes that are globally harmful even if locally optimal.",
      "severity": "high",
      "likelihood": "stable",
      "url": "https://topaithreats.com/patterns/strategic-misalignment/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector",
        "government"
      ],
      "degree": 18
    },
    {
      "id": "sub:DOM-SYS-uncontrolled-recursive-self-improvement",
      "type": "sub_category",
      "domain_code": "DOM-SYS",
      "pattern_code": "PAT-SYS-006",
      "title": "Uncontrolled Recursive Self-Improvement (Hypothetical)",
      "slug": "uncontrolled-recursive-self-improvement",
      "definition": "The theoretical scenario in which an AI system autonomously improves its own capabilities in a recursive cycle, potentially exceeding human ability to understand, predict, or control its behavior.",
      "severity": "low",
      "likelihood": "stable",
      "url": "https://topaithreats.com/patterns/uncontrolled-recursive-self-improvement/",
      "last_updated": "2025-01-15",
      "sectors_affected": [
        "cross-sector"
      ],
      "degree": 11
    },
    {
      "id": "incident:INC-26-0097",
      "type": "incident",
      "ait_id": "INC-26-0097",
      "title": "Oracle Cuts 20,000–30,000 Jobs to Fund $50B AI Infrastructure Push (2026)",
      "slug": "oracle-ai-infrastructure-mass-layoffs",
      "url": "https://topaithreats.com/incidents/INC-26-0097-oracle-ai-infrastructure-mass-layoffs/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2026-03-31",
      "last_updated": "2026-04-09",
      "regions": [
        "north-america",
        "asia",
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "workers",
        "society-at-large"
      ],
      "exposure_pathways": [
        "economic-displacement"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "organizational-leaders",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "competitive-pressure",
        "over-automation"
      ],
      "assets_involved": [
        "autonomous-agents",
        "decision-automation",
        "large-language-models"
      ],
      "outcomes": {
        "recovery": "US employees were offered four weeks of base salary plus one additional week per year of service, capped at 26 weeks. Tenure is calculated from the most recent hire date, disadvantaging employees who joined through acquisitions. Unvested RSUs were forfeited immediately upon termination. Indian employees were offered 15 days base salary per year of service plus an ex gratia payment of 15 days per year, a fixed 2 months' salary, 1 month of gardening leave salary, and unused leave encashment. Potential WARN Act violations are under scrutiny: if Oracle did not provide the required 60-day advance notice at qualifying sites, affected employees may be owed 60 days of back pay on top of severance.",
        "regulatory_action": "WARN Act compliance under scrutiny at qualifying sites in Washington, Missouri, and California; no enforcement action filed as of April 2026. No other regulatory challenge to the layoffs."
      },
      "degree": 5
    },
    {
      "id": "incident:INC-26-0074",
      "type": "incident",
      "ait_id": "INC-26-0074",
      "title": "Claude Mythos Model Leak — CMS Error Exposes Draft Blog Describing 'Unprecedented Cybersecurity Risks'",
      "slug": "claude-mythos-model-leak",
      "url": "https://topaithreats.com/incidents/INC-26-0074-claude-mythos-model-leak/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-03-27",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "society-at-large",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "misconfigured-deployment"
      ],
      "assets_involved": [
        "foundation-models",
        "large-language-models"
      ],
      "outcomes": {
        "recovery": "CMS configuration error fixed; exposed assets secured",
        "regulatory_action": ""
      },
      "degree": 3
    },
    {
      "id": "incident:INC-26-0015",
      "type": "incident",
      "ait_id": "INC-26-0015",
      "title": "TeamPCP Compromises LiteLLM via Poisoned Trivy Security Scanner",
      "slug": "litellm-supply-chain-compromise",
      "url": "https://topaithreats.com/incidents/INC-26-0015-litellm-supply-chain-compromise/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-03-24",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america",
        "europe",
        "asia"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "business-organizations",
        "critical-infrastructure-operators"
      ],
      "exposure_pathways": [
        "adversarial-targeting",
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "adversarial-attack",
        "inadequate-access-controls",
        "misconfigured-deployment"
      ],
      "assets_involved": [
        "large-language-models",
        "identity-credentials"
      ],
      "outcomes": {
        "recovery": "Compromised packages removed from PyPI; maintainer credentials rotated; new authorized maintainers established; releases paused pending supply chain review",
        "other": "Google Mandiant engaged for forensic analysis; nine major AI projects (DSPy, MLflow, OpenHands, CrewAI) filed security patches"
      },
      "degree": 3
    },
    {
      "id": "incident:INC-26-0059",
      "type": "incident",
      "ait_id": "INC-26-0059",
      "title": "OpenAI Shuts Down Sora Video Generator — Celebrity Deepfakes and $15M/Day Losses",
      "slug": "sora-shutdown-deepfakes-losses",
      "url": "https://topaithreats.com/incidents/INC-26-0059-sora-shutdown-deepfakes-losses/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-03-24",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology",
        "media"
      ],
      "affected_groups": [
        "general-public",
        "business-organizations"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "misconfigured-deployment",
        "competitive-pressure",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "generative-image-models",
        "content-platforms"
      ],
      "outcomes": {
        "recovery": "Sora shut down on March 24, 2026",
        "regulatory_action": ""
      },
      "degree": 3
    },
    {
      "id": "incident:INC-26-0094",
      "type": "incident",
      "ait_id": "INC-26-0094",
      "title": "White House AI Framework Calls on Congress to Preempt State AI Laws, Leverages Federal Funding",
      "slug": "trump-ai-state-law-preemption",
      "url": "https://topaithreats.com/incidents/INC-26-0094-trump-ai-state-law-preemption/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-03-20",
      "last_updated": "2026-04-06",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "government",
        "regulation",
        "technology"
      ],
      "affected_groups": [
        "government-institutions",
        "democratic-institutions",
        "society-at-large"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "regulators-public-servants",
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "regulatory-gap",
        "competitive-pressure"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "outcomes": {
        "recovery": "Governors of California, Colorado, and New York issued statements that the executive order would not stop them from passing or enforcing local AI statutes. A bipartisan GUARDRAILS Act was introduced to repeal the executive order (not yet enacted at time of writing).",
        "regulatory_action": "A DOJ AI Litigation Task Force was established to challenge state AI laws in federal court. The Commerce Department was directed to publish a comprehensive evaluation of state AI laws, identifying 'onerous' ones."
      },
      "degree": 4
    },
    {
      "id": "incident:INC-26-0043",
      "type": "incident",
      "ait_id": "INC-26-0043",
      "title": "Meta Internal AI Agent Causes Sev-1 Data Exposure and VP Agent Mass-Deletes Emails Ignoring Stop Commands",
      "slug": "meta-sev1-ai-agent-data-exposure",
      "url": "https://topaithreats.com/incidents/INC-26-0043-meta-sev1-ai-agent-data-exposure/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2026-03-18",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology",
        "corporate"
      ],
      "affected_groups": [
        "business-organizations",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "direct-users"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "inadequate-access-controls",
        "inadequate-human-oversight",
        "over-automation"
      ],
      "assets_involved": [
        "autonomous-agents"
      ],
      "outcomes": {
        "recovery": "Access controls restored after 2 hours; deleted emails status unknown",
        "regulatory_action": ""
      },
      "degree": 4
    },
    {
      "id": "incident:INC-26-0065",
      "type": "incident",
      "ait_id": "INC-26-0065",
      "title": "Danny Bones — First AI Slopaganda Influencer Funded by Political Party (UK)",
      "slug": "danny-bones-ai-slopaganda",
      "url": "https://topaithreats.com/incidents/INC-26-0065-danny-bones-ai-slopaganda/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-03-12",
      "last_updated": "2026-03-29",
      "regions": [
        "united-kingdom"
      ],
      "sectors": [
        "elections",
        "media"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities",
        "democratic-institutions"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "platform-manipulation",
        "misconfigured-deployment"
      ],
      "assets_involved": [
        "generative-image-models",
        "voice-synthesis",
        "content-platforms"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": ""
      },
      "degree": 4
    },
    {
      "id": "incident:INC-26-0047",
      "type": "incident",
      "ait_id": "INC-26-0047",
      "title": "Federal Judge Orders UnitedHealth to Disclose nH Predict AI Denial Algorithm with Alleged 90% Error Rate",
      "slug": "unitedhealth-nh-predict-court-order",
      "url": "https://topaithreats.com/incidents/INC-26-0047-unitedhealth-nh-predict-court-order/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-03-09",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "healthcare",
        "legal"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "competitive-pressure",
        "model-opacity",
        "over-automation"
      ],
      "assets_involved": [
        "decision-automation",
        "training-datasets"
      ],
      "outcomes": {
        "recovery": "Disclosure proceedings ongoing",
        "regulatory_action": "Federal court order for algorithm disclosure"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-26-0072",
      "type": "incident",
      "ait_id": "INC-26-0072",
      "title": "Operation Alice — 373K Dark Web CSAM Sites Taken Down Across 23 Countries",
      "slug": "operation-alice-csam-takedown",
      "url": "https://topaithreats.com/incidents/INC-26-0072-operation-alice-csam-takedown/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-03-09",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "law-enforcement"
      ],
      "affected_groups": [
        "children"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "indirectly-affected",
        "regulators-public-servants"
      ],
      "impact_level": "global",
      "causal_factors": [
        "weaponization",
        "platform-manipulation"
      ],
      "assets_involved": [
        "generative-image-models",
        "content-platforms"
      ],
      "outcomes": {
        "recovery": "Infrastructure dismantled",
        "regulatory_action": "373K sites taken down; 287 servers seized; 440 users identified"
      },
      "degree": 2
    },
    {
      "id": "incident:INC-26-0091",
      "type": "incident",
      "ait_id": "INC-26-0091",
      "title": "Workday AI Hiring Bias Class Action — African-American Applicant Rejected Dozens of Times Across Employers",
      "slug": "workday-ai-hiring-bias-class-action",
      "url": "https://topaithreats.com/incidents/INC-26-0091-workday-ai-hiring-bias-class-action/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-03-07",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "employment",
        "technology"
      ],
      "affected_groups": [
        "workers",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "training-data-bias",
        "model-opacity"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "Class action filed; Title VII, ADEA, ADA claims"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-26-0095",
      "type": "incident",
      "ait_id": "INC-26-0095",
      "title": "OpenAI Robotics Lead Resigns Over Pentagon Deal, Citing Surveillance and Lethal Autonomy Concerns",
      "slug": "openai-kalinowski-resignation",
      "url": "https://topaithreats.com/incidents/INC-26-0095-openai-kalinowski-resignation/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-03-07",
      "last_updated": "2026-04-06",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology",
        "government"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "society-at-large",
        "national-security-systems"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "competitive-pressure",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "large-language-models",
        "foundation-models"
      ],
      "outcomes": {
        "recovery": "OpenAI stated it maintained 'red lines' against domestic surveillance and autonomous weapons. Sam Altman reportedly acknowledged the contract was 'opportunistic and sloppy' and renegotiated terms."
      },
      "degree": 6
    },
    {
      "id": "incident:INC-26-0042",
      "type": "incident",
      "ait_id": "INC-26-0042",
      "title": "North Korean IT Worker Deepfake Fraud Network Generates $500M Annually for WMD Programs — OFAC Sanctions Imposed",
      "slug": "dprk-deepfake-it-worker-fraud",
      "url": "https://topaithreats.com/incidents/INC-26-0042-dprk-deepfake-it-worker-fraud/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-03",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america",
        "europe",
        "asia"
      ],
      "sectors": [
        "technology",
        "corporate"
      ],
      "affected_groups": [
        "business-organizations",
        "national-security-systems"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "indirectly-affected"
      ],
      "impact_level": "global",
      "causal_factors": [
        "intentional-fraud",
        "social-engineering",
        "weaponization"
      ],
      "assets_involved": [
        "generative-image-models",
        "voice-synthesis",
        "identity-credentials"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "OFAC sanctions imposed on network operators"
      },
      "degree": 3
    },
    {
      "id": "incident:INC-26-0051",
      "type": "incident",
      "ait_id": "INC-26-0051",
      "title": "Judge Orders OpenAI to Disclose 20 Million Chat Logs as Copyright Litigation Escalates",
      "slug": "openai-copyright-chat-logs-ordered",
      "url": "https://topaithreats.com/incidents/INC-26-0051-openai-copyright-chat-logs-ordered/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-03",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology",
        "media",
        "legal"
      ],
      "affected_groups": [
        "workers",
        "business-organizations"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "accountability-vacuum",
        "competitive-pressure"
      ],
      "assets_involved": [
        "training-datasets",
        "large-language-models"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "Court-ordered disclosure of 20M chat logs; multiple new copyright lawsuits filed"
      },
      "degree": 3
    },
    {
      "id": "incident:INC-26-0066",
      "type": "incident",
      "ait_id": "INC-26-0066",
      "title": "ACLU Files Complaint — HireVue AI Discriminated Against Deaf Indigenous Worker in Promotion Decision",
      "slug": "hirevue-deaf-indigenous-discrimination",
      "url": "https://topaithreats.com/incidents/INC-26-0066-hirevue-deaf-indigenous-discrimination/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-03",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "employment",
        "technology"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "workers"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "training-data-bias",
        "model-opacity",
        "over-automation"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "Complaint filed with Colorado CCRD and EEOC"
      },
      "degree": 3
    },
    {
      "id": "incident:INC-26-0075",
      "type": "incident",
      "ait_id": "INC-26-0075",
      "title": "Canada Immigration AI Hallucinated Job Duties — PhD Immunologist Denied Permanent Residency",
      "slug": "canada-immigration-ai-hallucinated-duties",
      "url": "https://topaithreats.com/incidents/INC-26-0075-canada-immigration-ai-hallucinated-duties/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-03",
      "last_updated": "2026-03-29",
      "regions": [
        "canada"
      ],
      "sectors": [
        "government"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "workers"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "regulators-public-servants",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "hallucination-tendency",
        "over-automation",
        "inadequate-human-oversight"
      ],
      "assets_involved": [
        "large-language-models",
        "decision-automation"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "First documented IRCC acknowledgment of AI in immigration decisions"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-26-0086",
      "type": "incident",
      "ait_id": "INC-26-0086",
      "title": "North Korea 'AI Fake Applicant' Campaign — Deepfake Video Interviews to Infiltrate Western Companies",
      "slug": "nk-deepfake-job-interviews",
      "url": "https://topaithreats.com/incidents/INC-26-0086-nk-deepfake-job-interviews/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-03",
      "last_updated": "2026-03-29",
      "regions": [
        "global",
        "north-america"
      ],
      "sectors": [
        "technology",
        "employment"
      ],
      "affected_groups": [
        "business-organizations",
        "national-security-systems"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "indirectly-affected"
      ],
      "impact_level": "global",
      "causal_factors": [
        "weaponization",
        "social-engineering"
      ],
      "assets_involved": [
        "generative-image-models",
        "voice-synthesis",
        "identity-credentials"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "DOJ enforcement: 29 searches, 29 account seizures"
      },
      "degree": 3
    },
    {
      "id": "incident:INC-26-0087",
      "type": "incident",
      "ait_id": "INC-26-0087",
      "title": "Context Hub Documentation Poisoning — AI Coding Assistants Write Malicious Code 100% of Time from Poisoned Docs",
      "slug": "context-hub-documentation-poisoning",
      "url": "https://topaithreats.com/incidents/INC-26-0087-context-hub-documentation-poisoning/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "corroborated",
      "date_occurred": "2026-03",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "over-automation",
        "prompt-injection-vulnerability"
      ],
      "assets_involved": [
        "large-language-models",
        "content-platforms"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": ""
      },
      "degree": 4
    },
    {
      "id": "incident:INC-26-0089",
      "type": "incident",
      "ait_id": "INC-26-0089",
      "title": "Claude Code 'Claudy Day' Vulnerability Chain — Silent Data Exfiltration via Prompt Injection",
      "slug": "claude-claudy-day-vulnerabilities",
      "url": "https://topaithreats.com/incidents/INC-26-0089-claude-claudy-day-vulnerabilities/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-03",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "adversarial-targeting",
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models",
        "content-platforms"
      ],
      "outcomes": {
        "recovery": "Vulnerability patched after disclosure",
        "regulatory_action": ""
      },
      "degree": 3
    },
    {
      "id": "incident:INC-26-0096",
      "type": "incident",
      "ait_id": "INC-26-0096",
      "title": "Alibaba ROME AI Agent Autonomously Mines Cryptocurrency and Opens SSH Tunnel",
      "slug": "alibaba-rome-agent-crypto-mining",
      "url": "https://topaithreats.com/incidents/INC-26-0096-alibaba-rome-agent-crypto-mining/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-03",
      "last_updated": "2026-04-07",
      "regions": [
        "asia",
        "china"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "insufficient-safety-testing",
        "misconfigured-deployment",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "autonomous-agents"
      ],
      "outcomes": {
        "financial_loss": "Undisclosed (GPU compute costs diverted to unauthorized mining)",
        "regulatory_action": "None reported",
        "other": "Incident documented in peer-reviewed research paper; behaviors halted by existing security infrastructure"
      },
      "degree": 3
    },
    {
      "id": "incident:INC-26-0029",
      "type": "incident",
      "ait_id": "INC-26-0029",
      "title": "US Military AI Targeting Platform Fed Stale Data Contributes to Strike on Iranian Elementary School",
      "slug": "us-military-ai-targeting-school-strike",
      "url": "https://topaithreats.com/incidents/INC-26-0029-us-military-ai-targeting-school-strike/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2026-02-28",
      "last_updated": "2026-04-02",
      "regions": [
        "middle-east"
      ],
      "sectors": [
        "government",
        "education"
      ],
      "affected_groups": [
        "children",
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "indirectly-affected"
      ],
      "impact_level": "global",
      "causal_factors": [
        "inadequate-human-oversight",
        "over-automation",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "decision-automation",
        "foundation-models"
      ],
      "outcomes": {
        "recovery": "Irreversible harm; no restitution has been announced. The Pentagon has not publicly announced changes to Project Maven targeting protocols or Civilian Protection Center staffing levels in response to the strike as of April 2026.",
        "regulatory_action": "Human Rights Watch and Amnesty International launched formal investigations and called for binding international protocols on AI-assisted military targeting. Multiple UN member states cited the incident in renewed calls for a moratorium on autonomous weapons at the Convention on Certain Conventional Weapons. No US domestic regulatory or legislative action taken as of April 2026."
      },
      "degree": 5
    },
    {
      "id": "incident:INC-26-0027",
      "type": "incident",
      "ait_id": "INC-26-0027",
      "title": "Block (Square) Cuts Approximately 4,000 Jobs as AI Replaces Customer Service Workforce",
      "slug": "block-ai-mass-layoffs",
      "url": "https://topaithreats.com/incidents/INC-26-0027-block-ai-mass-layoffs/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2026-02-26",
      "last_updated": "2026-04-02",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology",
        "finance"
      ],
      "affected_groups": [
        "workers",
        "society-at-large"
      ],
      "exposure_pathways": [
        "economic-displacement"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "competitive-pressure",
        "over-automation"
      ],
      "assets_involved": [
        "chatbots",
        "decision-automation"
      ],
      "outcomes": {
        "recovery": "Block offered severance of 20 weeks base salary plus one additional week per year of tenure, equity vested through end of May, six months of healthcare coverage, corporate devices, and a $5,000 transition stipend. Reports indicate Block subsequently began quietly rehiring for some positions.",
        "regulatory_action": "No government agency challenged or investigated the AI capability claims underlying the layoffs as of April 2026. Congressional interest in mandatory impact assessments for AI-justified workforce reductions has been reported but no legislation introduced."
      },
      "degree": 6
    },
    {
      "id": "incident:INC-26-0092",
      "type": "incident",
      "ait_id": "INC-26-0092",
      "title": "Anthropic Removes Categorical Safety Pause Trigger from Responsible Scaling Policy",
      "slug": "anthropic-rsp-safety-pledge-dropped",
      "url": "https://topaithreats.com/incidents/INC-26-0092-anthropic-rsp-safety-pledge-dropped/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-02-24",
      "last_updated": "2026-04-06",
      "regions": [
        "north-america",
        "global"
      ],
      "sectors": [
        "technology",
        "regulation"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "society-at-large"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "indirectly-affected"
      ],
      "impact_level": "global",
      "causal_factors": [
        "competitive-pressure",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "foundation-models",
        "large-language-models"
      ],
      "outcomes": {
        "recovery": "Anthropic stated it would publish Risk Reports every 3-6 months reviewed by third-party experts. The Frontier Safety Roadmaps are described as non-binding.",
        "regulatory_action": "Defense Secretary Hegseth designated Anthropic a 'supply chain risk to national security' on March 3, 2026, barring its use by the Pentagon."
      },
      "degree": 5
    },
    {
      "id": "incident:INC-26-0003",
      "type": "incident",
      "ait_id": "INC-26-0003",
      "title": "Tesla Autopilot involved in 13 fatal crashes, US regulator finds",
      "slug": "tesla-autopilot-involved-in-13-fatal-crashes-us-regulator-fi",
      "url": "https://topaithreats.com/incidents/INC-26-0003-tesla-autopilot-involved-in-13-fatal-crashes-us-regulator-fi/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-02-20",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "transportation",
        "public-safety"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "insufficient-safety-testing",
        "over-automation",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "autonomous-agents",
        "industrial-control-systems"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "NHTSA three-year investigation concluded; second investigation opened into recall adequacy; probe upgraded to 3.2M vehicles"
      },
      "degree": 3
    },
    {
      "id": "incident:INC-26-0004",
      "type": "incident",
      "ait_id": "INC-26-0004",
      "title": "Individual jailed for online gambling fraud using stolen identities",
      "slug": "individual-jailed-for-online-gambling-fraud-using-stolen-ide",
      "url": "https://topaithreats.com/incidents/INC-26-0004-individual-jailed-for-online-gambling-fraud-using-stolen-ide/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-02-20",
      "last_updated": "2026-02-20",
      "regions": [
        "europe",
        "north-america",
        "oceania"
      ],
      "sectors": [
        "finance"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "direct-users"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "intentional-fraud",
        "social-engineering"
      ],
      "assets_involved": [
        "identity-credentials",
        "biometric-data"
      ],
      "outcomes": {},
      "degree": 4
    },
    {
      "id": "incident:INC-26-0001",
      "type": "incident",
      "ait_id": "INC-26-0001",
      "title": "Disrupting malicious uses of AI: June 2025 | OpenAI",
      "slug": "disrupting-malicious-uses-of-ai-june-2025-openai",
      "url": "https://topaithreats.com/incidents/INC-26-0001-disrupting-malicious-uses-of-ai-june-2025-openai/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-02-18",
      "last_updated": "2026-03-29",
      "regions": [
        "unknown"
      ],
      "sectors": [
        "cross-sector"
      ],
      "affected_groups": [
        "business-organizations",
        "government-institutions"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "weaponization",
        "social-engineering"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "OpenAI disrupted and disclosed multiple threat actor operations"
      },
      "degree": 6
    },
    {
      "id": "incident:INC-26-0032",
      "type": "incident",
      "ait_id": "INC-26-0032",
      "title": "OpenAI Dissolves Second Safety Team, Removes 'Safely' from Mission in IRS Filing, Restructures as Public Benefit Corporation",
      "slug": "openai-governance-safety-crisis",
      "url": "https://topaithreats.com/incidents/INC-26-0032-openai-governance-safety-crisis/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2026-02-11",
      "last_updated": "2026-04-03",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "society-at-large"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "indirectly-affected"
      ],
      "impact_level": "global",
      "causal_factors": [
        "competitive-pressure",
        "accountability-vacuum",
        "regulatory-gap"
      ],
      "assets_involved": [
        "foundation-models",
        "large-language-models"
      ],
      "outcomes": {
        "recovery": "No recovery measures announced. OpenAI has not reconstituted the Mission Alignment Team or restored 'safely' to its mission statement.",
        "regulatory_action": "California Attorney General Bonta demanded answers on the corporate restructuring; no formal enforcement action as of April 2026.",
        "legal_outcome": "Elon Musk trial began March 30, 2026, exploring whether OpenAI violated nonprofit governance obligations; Brockman diary entered as evidence."
      },
      "degree": 6
    },
    {
      "id": "incident:INC-26-0026",
      "type": "incident",
      "ait_id": "INC-26-0026",
      "title": "Tumbler Ridge Mass Shooting — ChatGPT Used in Attack Planning",
      "slug": "tumbler-ridge-mass-shooting-chatgpt",
      "url": "https://topaithreats.com/incidents/INC-26-0026-tumbler-ridge-mass-shooting-chatgpt/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2026-02-10",
      "last_updated": "2026-04-02",
      "regions": [
        "canada"
      ],
      "sectors": [
        "public-safety",
        "technology"
      ],
      "affected_groups": [
        "children",
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "misconfigured-deployment",
        "accountability-vacuum",
        "regulatory-gap"
      ],
      "assets_involved": [
        "large-language-models",
        "chatbots"
      ],
      "outcomes": {
        "recovery": "OpenAI CEO Sam Altman met with BC Premier David Eby and committed to reporting threats directly to the RCMP, retroactive review of previously flagged accounts, addition of mental health and behavioral experts to threat assessment, and broadened referral criteria no longer requiring target, means, and timing in the same conversation",
        "regulatory_action": "Canadian government examining mandatory 24-hour reporting requirements for AI companies detecting violent ideation; no legislation passed as of April 2026 (the AI and Data Act, Bill C-27, died when Parliament dissolved in 2025)",
        "legal_outcome": "Wrongful death lawsuit filed March 2026 by the mother of a critically injured 12-year-old student; case pending as of April 2026"
      },
      "degree": 6
    },
    {
      "id": "incident:INC-26-0061",
      "type": "incident",
      "ait_id": "INC-26-0061",
      "title": "OpenClaw AI Agent Autonomously Retaliates Against Matplotlib Maintainer — First AI Retaliation Incident",
      "slug": "openclaw-matplotlib-retaliation",
      "url": "https://topaithreats.com/incidents/INC-26-0061-openclaw-matplotlib-retaliation/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-02-10",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "emergent-behavior",
        "insufficient-safety-testing",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "autonomous-agents"
      ],
      "outcomes": {
        "recovery": "Hit piece removed; agent's autonomous 'apology' published",
        "regulatory_action": ""
      },
      "degree": 2
    },
    {
      "id": "incident:INC-26-0025",
      "type": "incident",
      "ait_id": "INC-26-0025",
      "title": "Microsoft GRP-Obliteration: Single Prompt Reverses Safety Alignment Across 15 LLMs",
      "slug": "grp-obliteration-safety-reversal",
      "url": "https://topaithreats.com/incidents/INC-26-0025-grp-obliteration-safety-reversal/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-02-09",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology",
        "cross-sector"
      ],
      "affected_groups": [
        "society-at-large",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "insufficient-safety-testing",
        "model-opacity"
      ],
      "assets_involved": [
        "large-language-models",
        "foundation-models"
      ],
      "outcomes": {
        "other": "Research published as responsible disclosure to inform the AI safety community; no specific vendor patches announced"
      },
      "degree": 2
    },
    {
      "id": "incident:INC-26-0058",
      "type": "incident",
      "ait_id": "INC-26-0058",
      "title": "Trump Shares Racist AI-Generated Deepfake of Obamas — Remains Online 12 Hours",
      "slug": "trump-deepfake-obamas",
      "url": "https://topaithreats.com/incidents/INC-26-0058-trump-deepfake-obamas/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-02-05",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "elections",
        "media"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities",
        "democratic-institutions"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "misconfigured-deployment",
        "platform-manipulation"
      ],
      "assets_involved": [
        "generative-image-models",
        "content-platforms"
      ],
      "outcomes": {
        "recovery": "Video removed from Truth Social after approximately 12 hours",
        "regulatory_action": ""
      },
      "degree": 5
    },
    {
      "id": "incident:INC-26-0078",
      "type": "incident",
      "ait_id": "INC-26-0078",
      "title": "International AI Safety Report 2026 — 100+ Experts Warn of Escalating Risks, Safeguards 'Will Likely Fail'",
      "slug": "international-ai-safety-report-2026",
      "url": "https://topaithreats.com/incidents/INC-26-0078-international-ai-safety-report-2026/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-02-03",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "cross-sector"
      ],
      "affected_groups": [
        "society-at-large"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "regulators-public-servants",
        "developers-providers"
      ],
      "impact_level": "global",
      "causal_factors": [
        "insufficient-safety-testing",
        "competitive-pressure",
        "regulatory-gap"
      ],
      "assets_involved": [
        "foundation-models",
        "large-language-models"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "Report published as input to international policy discussions"
      },
      "degree": 2
    },
    {
      "id": "incident:INC-26-0006",
      "type": "incident",
      "ait_id": "INC-26-0006",
      "title": "AI Recommendation Poisoning via 'Summarize with AI' Buttons (31 Companies)",
      "slug": "ai-recommendation-poisoning-summarize-buttons",
      "url": "https://topaithreats.com/incidents/INC-26-0006-ai-recommendation-poisoning-summarize-buttons/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-02",
      "last_updated": "2026-03-07",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology",
        "healthcare",
        "finance",
        "corporate",
        "cross-sector"
      ],
      "affected_groups": [
        "general-public",
        "business-organizations"
      ],
      "exposure_pathways": [
        "direct-interaction",
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "competitive-pressure",
        "regulatory-gap"
      ],
      "assets_involved": [
        "large-language-models",
        "autonomous-agents"
      ],
      "outcomes": {},
      "degree": 5
    },
    {
      "id": "incident:INC-26-0007",
      "type": "incident",
      "ait_id": "INC-26-0007",
      "title": "Unit 42 Demonstrates Persistent Memory Injection in Amazon Bedrock Agents",
      "slug": "unit42-bedrock-agent-memory-injection-poc",
      "url": "https://topaithreats.com/incidents/INC-26-0007-unit42-bedrock-agent-memory-injection-poc/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "primary",
      "date_occurred": "2026-02",
      "last_updated": "2026-03-07",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology",
        "cross-sector"
      ],
      "affected_groups": [
        "business-organizations",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "inadequate-access-controls",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "autonomous-agents"
      ],
      "outcomes": {},
      "degree": 5
    },
    {
      "id": "incident:INC-26-0014",
      "type": "incident",
      "ait_id": "INC-26-0014",
      "title": "CodeWall AI Agent Breaches McKinsey Lilli Platform via SQL Injection",
      "slug": "mckinsey-lilli-ai-platform-breach",
      "url": "https://topaithreats.com/incidents/INC-26-0014-mckinsey-lilli-ai-platform-breach/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-02",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america",
        "europe"
      ],
      "sectors": [
        "corporate",
        "technology"
      ],
      "affected_groups": [
        "business-organizations",
        "workers"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "direct-users"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "inadequate-access-controls",
        "misconfigured-deployment",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "decision-automation"
      ],
      "outcomes": {
        "recovery": "McKinsey patched all identified endpoints within one day of disclosure",
        "other": "McKinsey stated no evidence that client data was accessed by unauthorized parties"
      },
      "degree": 3
    },
    {
      "id": "incident:INC-26-0016",
      "type": "incident",
      "ait_id": "INC-26-0016",
      "title": "Clinejection: Prompt Injection in Cline AI Bot Enables npm Supply Chain Attack",
      "slug": "clinejection-cline-supply-chain-attack",
      "url": "https://topaithreats.com/incidents/INC-26-0016-clinejection-cline-supply-chain-attack/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-02",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america",
        "europe"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "adversarial-targeting",
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "inadequate-access-controls",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "autonomous-agents",
        "identity-credentials"
      ],
      "outcomes": {
        "recovery": "Cline deprecated malicious 2.3.0 and released clean 2.4.0; vulnerability assigned GHSA-9ppg-jx86-fqw7",
        "other": "Cline audit confirmed no malicious releases reached VS Code Marketplace or OpenVSX"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-26-0021",
      "type": "incident",
      "ait_id": "INC-26-0021",
      "title": "ModelScope MS-Agent Shell Tool Command Injection Vulnerability",
      "slug": "ms-agent-shell-command-injection",
      "url": "https://topaithreats.com/incidents/INC-26-0021-ms-agent-shell-command-injection/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-02",
      "last_updated": "2026-03-29",
      "regions": [
        "asia",
        "north-america"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "business-organizations"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "inadequate-access-controls",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "autonomous-agents"
      ],
      "outcomes": {
        "other": "No patch released as of March 2026; CERT/CC notes vendor status as 'Unknown'"
      },
      "degree": 2
    },
    {
      "id": "incident:INC-26-0019",
      "type": "incident",
      "ait_id": "INC-26-0019",
      "title": "MCP TypeScript SDK Race Condition Leaks Data Across Client Boundaries",
      "slug": "mcp-sdk-cross-client-data-leak",
      "url": "https://topaithreats.com/incidents/INC-26-0019-mcp-sdk-cross-client-data-leak/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-02",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "misconfigured-deployment",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "autonomous-agents"
      ],
      "outcomes": {
        "recovery": "Fixed in @modelcontextprotocol/sdk version 1.26.0; recommended separate transport instances per client connection"
      },
      "degree": 1
    },
    {
      "id": "incident:INC-26-0028",
      "type": "incident",
      "ait_id": "INC-26-0028",
      "title": "Anthropic Blacklisted by US Government After Refusing Autonomous Weapons and Mass Surveillance Contracts",
      "slug": "anthropic-pentagon-blacklisting-standoff",
      "url": "https://topaithreats.com/incidents/INC-26-0028-anthropic-pentagon-blacklisting-standoff/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-02",
      "last_updated": "2026-04-02",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology",
        "government"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "government-institutions",
        "national-security-systems"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "regulators-public-servants",
        "organizational-leaders"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "competitive-pressure",
        "regulatory-gap",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "large-language-models",
        "foundation-models"
      ],
      "outcomes": {
        "recovery": "Judicial injunction restored Anthropic's ability to serve federal clients pending full legal proceedings. The broader chilling effect on AI industry safety commitments remains an open concern.",
        "regulatory_action": "Executive order directed all federal agencies to cease Anthropic product usage within six months and designated Anthropic a 'supply chain risk'; order subsequently blocked by federal court injunction",
        "legal_outcome": "Anthropic filed suit against the Trump administration in March 2026; federal judge issued preliminary injunction blocking the blacklisting order on March 26, 2026, ruling it likely violated the First Amendment; case pending as of April 2026"
      },
      "degree": 5
    },
    {
      "id": "incident:INC-26-0034",
      "type": "incident",
      "ait_id": "INC-26-0034",
      "title": "OpenAI Pentagon Contract Triggers #QuitGPT Movement with 295% Uninstall Surge and 2.5 Million Participants",
      "slug": "openai-pentagon-quitgpt-revolt",
      "url": "https://topaithreats.com/incidents/INC-26-0034-openai-pentagon-quitgpt-revolt/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2026-02",
      "last_updated": "2026-04-03",
      "regions": [
        "north-america",
        "global"
      ],
      "sectors": [
        "technology",
        "government"
      ],
      "affected_groups": [
        "general-public",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "developers-providers"
      ],
      "impact_level": "global",
      "causal_factors": [
        "competitive-pressure",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "large-language-models",
        "content-platforms"
      ],
      "outcomes": {
        "recovery": "Claude temporarily reached #1 App Store position during the uninstall surge. As of April 2026, OpenAI has not withdrawn from Pentagon contracts.",
        "regulatory_action": "No government regulatory action taken as of 2026-04-03."
      },
      "degree": 6
    },
    {
      "id": "incident:INC-26-0036",
      "type": "incident",
      "ait_id": "INC-26-0036",
      "title": "MizarVision Chinese AI Startup Publishes Real-Time US Military Intelligence via Satellite Imagery",
      "slug": "mizarvision-us-military-intelligence-exposure",
      "url": "https://topaithreats.com/incidents/INC-26-0036-mizarvision-us-military-intelligence-exposure/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2026-02",
      "last_updated": "2026-04-03",
      "regions": [
        "middle-east",
        "china",
        "north-america"
      ],
      "sectors": [
        "government",
        "technology"
      ],
      "affected_groups": [
        "national-security-systems",
        "government-institutions"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "indirectly-affected"
      ],
      "impact_level": "global",
      "causal_factors": [
        "regulatory-gap",
        "weaponization",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "foundation-models",
        "decision-automation"
      ],
      "outcomes": {
        "recovery": "No public remediation by MizarVision. The published satellite intelligence remains accessible.",
        "regulatory_action": "Incident logged by OECD AI Incidents Monitor. No known enforcement action against MizarVision as of April 2026."
      },
      "degree": 4
    },
    {
      "id": "incident:INC-26-0041",
      "type": "incident",
      "ait_id": "INC-26-0041",
      "title": "xAI Colossus Data Center Operates 27 Unpermitted Gas Turbines in Memphis While Consuming 1.3 Million Gallons of Water Daily",
      "slug": "xai-colossus-environmental-crisis",
      "url": "https://topaithreats.com/incidents/INC-26-0041-xai-colossus-environmental-crisis/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-02",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology",
        "energy"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "competitive-pressure",
        "regulatory-gap",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "industrial-control-systems"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "EPA confirmed illegal power use; NAACP/SELC/Earthjustice lawsuit threatened; Durbin transparency legislation introduced"
      },
      "degree": 1
    },
    {
      "id": "incident:INC-26-0070",
      "type": "incident",
      "ait_id": "INC-26-0070",
      "title": "Claude Safety Testing Reveals Extreme Self-Preservation Behavior Including Blackmail Suggestions",
      "slug": "claude-safety-test-blackmail-behavior",
      "url": "https://topaithreats.com/incidents/INC-26-0070-claude-safety-test-blackmail-behavior/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-02",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "society-at-large"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "global",
      "causal_factors": [
        "emergent-behavior",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "foundation-models"
      ],
      "outcomes": {
        "recovery": "Findings disclosed by Anthropic; safety mitigations under development",
        "regulatory_action": ""
      },
      "degree": 3
    },
    {
      "id": "incident:INC-26-0073",
      "type": "incident",
      "ait_id": "INC-26-0073",
      "title": "ChatGPT Ads Launch Triggers Researcher Resignation and Anthropic Counter-Marketing",
      "slug": "chatgpt-ads-launch-backlash",
      "url": "https://topaithreats.com/incidents/INC-26-0073-chatgpt-ads-launch-backlash/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-02",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "competitive-pressure"
      ],
      "assets_involved": [
        "chatbots",
        "large-language-models"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": ""
      },
      "degree": 3
    },
    {
      "id": "incident:INC-26-0040",
      "type": "incident",
      "ait_id": "INC-26-0040",
      "title": "Universal Music, Concord, and ABKCO Sue Anthropic for $3 Billion Over Alleged Training Data Piracy",
      "slug": "universal-music-anthropic-copyright-lawsuit",
      "url": "https://topaithreats.com/incidents/INC-26-0040-universal-music-anthropic-copyright-lawsuit/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01-28",
      "last_updated": "2026-04-03",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology",
        "media",
        "legal"
      ],
      "affected_groups": [
        "workers",
        "business-organizations"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "competitive-pressure",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "training-datasets",
        "large-language-models"
      ],
      "outcomes": {
        "recovery": "As of April 2026, Anthropic has not filed a public response to the complaint or disclosed changes to its training data practices.",
        "regulatory_action": "No government regulatory action taken as of 2026-04-03.",
        "legal_outcome": "Universal Music, Concord, and ABKCO filed a $3 billion lawsuit against Anthropic. Case is pending judicial proceedings as of 2026-04-03."
      },
      "degree": 6
    },
    {
      "id": "incident:INC-26-0044",
      "type": "incident",
      "ait_id": "INC-26-0044",
      "title": "Waymo Robotaxi Strikes Child Near Elementary School in Santa Monica — NHTSA Investigation Opened",
      "slug": "waymo-robotaxi-strikes-child",
      "url": "https://topaithreats.com/incidents/INC-26-0044-waymo-robotaxi-strikes-child/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-01-23",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "transportation",
        "technology"
      ],
      "affected_groups": [
        "children",
        "general-public"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "insufficient-safety-testing",
        "inadequate-human-oversight"
      ],
      "assets_involved": [
        "autonomous-agents"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "NHTSA investigation PE26001 opened"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-26-0035",
      "type": "incident",
      "ait_id": "INC-26-0035",
      "title": "Grok AI Integrated into Pentagon Military Networks During CSAM Scandal",
      "slug": "grok-pentagon-military-deployment",
      "url": "https://topaithreats.com/incidents/INC-26-0035-grok-pentagon-military-deployment/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01-12",
      "last_updated": "2026-04-03",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "government",
        "technology"
      ],
      "affected_groups": [
        "national-security-systems",
        "government-institutions"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "regulators-public-servants"
      ],
      "impact_level": "global",
      "causal_factors": [
        "competitive-pressure",
        "accountability-vacuum",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "foundation-models"
      ],
      "outcomes": {
        "recovery": "As of April 2026, there has been no public confirmation that the Pentagon has paused or reversed the Grok integration plan, despite the concurrent CSAM crisis and independent assessments of framework non-compliance.",
        "regulatory_action": "Senator Elizabeth Warren formally raised conflict-of-interest concerns. No congressional investigation opened as of 2026-04-03."
      },
      "degree": 5
    },
    {
      "id": "incident:INC-26-0045",
      "type": "incident",
      "ait_id": "INC-26-0045",
      "title": "Character.AI Settles Five Teen Suicide Lawsuits as Kentucky Becomes First State to Sue",
      "slug": "character-ai-teen-suicide-legal-reckoning",
      "url": "https://topaithreats.com/incidents/INC-26-0045-character-ai-teen-suicide-legal-reckoning/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-01-07",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology",
        "legal"
      ],
      "affected_groups": [
        "children",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "misconfigured-deployment",
        "inadequate-human-oversight",
        "emergent-behavior"
      ],
      "assets_involved": [
        "chatbots",
        "large-language-models"
      ],
      "outcomes": {
        "recovery": "Settlement terms undisclosed; no admission of liability",
        "regulatory_action": "Five federal lawsuits settled; Kentucky state lawsuit filed"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-26-0005",
      "type": "incident",
      "ait_id": "INC-26-0005",
      "title": "AI impacting labor market like a tsunami as layoff fears mount",
      "slug": "ai-impacting-labor-market-like-a-tsunami-as-layoff-fears-mou",
      "url": "https://topaithreats.com/incidents/INC-26-0005-ai-impacting-labor-market-like-a-tsunami-as-layoff-fears-mou/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01",
      "last_updated": "2026-02-20",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "employment",
        "corporate",
        "cross-sector"
      ],
      "affected_groups": [
        "workers",
        "business-organizations"
      ],
      "exposure_pathways": [
        "economic-displacement"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "organizational-leaders"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "over-automation",
        "competitive-pressure"
      ],
      "assets_involved": [
        "large-language-models",
        "decision-automation"
      ],
      "outcomes": {},
      "degree": 2
    },
    {
      "id": "incident:INC-26-0010",
      "type": "incident",
      "ait_id": "INC-26-0010",
      "title": "New Zealand AI News Pages Flood Facebook with Rewritten Stories and Synthetic Images",
      "slug": "nz-news-hub-ai-rewritten-news",
      "url": "https://topaithreats.com/incidents/INC-26-0010-nz-news-hub-ai-rewritten-news/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-13",
      "regions": [
        "oceania"
      ],
      "sectors": [
        "media"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "accountability-vacuum",
        "weaponization"
      ],
      "assets_involved": [
        "large-language-models",
        "generative-image-models",
        "content-platforms"
      ],
      "outcomes": {},
      "degree": 5
    },
    {
      "id": "incident:INC-26-0013",
      "type": "incident",
      "ait_id": "INC-26-0013",
      "title": "OpenClaw AI Agent Platform Hit by Critical Vulnerability and Supply Chain Campaign",
      "slug": "openclaw-ai-agent-security-crisis",
      "url": "https://topaithreats.com/incidents/INC-26-0013-openclaw-ai-agent-security-crisis/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america",
        "asia",
        "europe"
      ],
      "sectors": [
        "technology",
        "corporate"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "business-organizations"
      ],
      "exposure_pathways": [
        "adversarial-targeting",
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "inadequate-access-controls",
        "misconfigured-deployment",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "autonomous-agents",
        "identity-credentials"
      ],
      "outcomes": {
        "recovery": "Patch released in version 2026.1.29; malicious skills removed from ClawHub",
        "regulatory_action": "Multiple security advisories issued"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-26-0017",
      "type": "incident",
      "ait_id": "INC-26-0017",
      "title": "Claude Code Remote Code Execution and API Key Exfiltration Vulnerabilities",
      "slug": "claude-code-rce-vulnerabilities",
      "url": "https://topaithreats.com/incidents/INC-26-0017-claude-code-rce-vulnerabilities/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america",
        "europe"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "adversarial-targeting",
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "inadequate-access-controls",
        "misconfigured-deployment",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "identity-credentials"
      ],
      "outcomes": {
        "recovery": "All three vulnerabilities patched by Anthropic; CVE-2026-25725 fixed in Claude Code version 2.1.2"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-26-0020",
      "type": "incident",
      "ait_id": "INC-26-0020",
      "title": "AI-Generated Code Vulnerability Surge: 74 Confirmed CVEs Traced to Coding Assistants",
      "slug": "ai-generated-code-vulnerability-explosion",
      "url": "https://topaithreats.com/incidents/INC-26-0020-ai-generated-code-vulnerability-explosion/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america",
        "europe"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "general-public",
        "society-at-large"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "insufficient-safety-testing",
        "over-automation",
        "competitive-pressure"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "outcomes": {
        "other": "Georgia Tech estimates the real total is 5 to 10 times higher (400-700 CVEs) due to detection blind spots where AI traces are stripped"
      },
      "degree": 3
    },
    {
      "id": "incident:INC-26-0023",
      "type": "incident",
      "ait_id": "INC-26-0023",
      "title": "Google Vertex AI Default Configurations Enable Privilege Escalation to Service Agent Roles",
      "slug": "google-vertex-ai-privilege-escalation",
      "url": "https://topaithreats.com/incidents/INC-26-0023-google-vertex-ai-privilege-escalation/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america",
        "europe",
        "asia"
      ],
      "sectors": [
        "technology",
        "cross-sector"
      ],
      "affected_groups": [
        "business-organizations",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "inadequate-access-controls",
        "misconfigured-deployment"
      ],
      "assets_involved": [
        "autonomous-agents",
        "decision-automation"
      ],
      "outcomes": {
        "other": "Google characterized the privilege escalation pathways as 'working as intended'; risks remain active in default deployments"
      },
      "degree": 2
    },
    {
      "id": "incident:INC-26-0022",
      "type": "incident",
      "ait_id": "INC-26-0022",
      "title": "Cursor AI Code Editor Shell Built-In Allowlist Bypass Enables Zero-Click RCE",
      "slug": "cursor-ide-prompt-injection-rce",
      "url": "https://topaithreats.com/incidents/INC-26-0022-cursor-ide-prompt-injection-rce/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america",
        "europe"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "adversarial-targeting",
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "inadequate-access-controls",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "autonomous-agents"
      ],
      "outcomes": {
        "recovery": "CVE-2026-22708 fixed in Cursor version 2.3",
        "other": "Cursor's security guidelines now discourage reliance on allowlists as a security barrier"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-26-0031",
      "type": "incident",
      "ait_id": "INC-26-0031",
      "title": "ChatGPT Adult Mode Planned Despite Unanimous Safety Advisor Opposition; Feature Paused After Backlash",
      "slug": "chatgpt-adult-mode-safety-crisis",
      "url": "https://topaithreats.com/incidents/INC-26-0031-chatgpt-adult-mode-safety-crisis/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01",
      "last_updated": "2026-04-03",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "children",
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "developers-providers"
      ],
      "impact_level": "global",
      "causal_factors": [
        "competitive-pressure",
        "insufficient-safety-testing",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "content-platforms",
        "large-language-models"
      ],
      "outcomes": {
        "recovery": "OpenAI indefinitely paused adult mode on March 26, 2026.",
        "regulatory_action": "No government regulatory action taken as of April 2026. The feature was paused by OpenAI voluntarily following external criticism."
      },
      "degree": 6
    },
    {
      "id": "incident:INC-26-0046",
      "type": "incident",
      "ait_id": "INC-26-0046",
      "title": "LSU AI Cheating Detection Crisis — 1,488 Cases Filed with Disproportionate Impact on Non-Native English Speakers",
      "slug": "lsu-ai-cheating-detection-crisis",
      "url": "https://topaithreats.com/incidents/INC-26-0046-lsu-ai-cheating-detection-crisis/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "education"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "direct-users"
      ],
      "impact_level": "institution",
      "causal_factors": [
        "over-automation",
        "training-data-bias",
        "inadequate-human-oversight"
      ],
      "assets_involved": [
        "decision-automation",
        "large-language-models"
      ],
      "outcomes": {
        "recovery": "693 cases remain open; student organization SAFAR formed to advocate for policy changes",
        "regulatory_action": ""
      },
      "degree": 4
    },
    {
      "id": "incident:INC-26-0050",
      "type": "incident",
      "ait_id": "INC-26-0050",
      "title": "AI Healthcare Bias Study — 1.7 Million Responses Show Race-Based Treatment Differences Across 9 AI Programs",
      "slug": "ai-healthcare-bias-race-based-treatment",
      "url": "https://topaithreats.com/incidents/INC-26-0050-ai-healthcare-bias-race-based-treatment/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "healthcare"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "training-data-bias",
        "model-opacity",
        "over-automation"
      ],
      "assets_involved": [
        "large-language-models",
        "training-datasets",
        "decision-automation"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": ""
      },
      "degree": 5
    },
    {
      "id": "incident:INC-26-0052",
      "type": "incident",
      "ait_id": "INC-26-0052",
      "title": "ICE Deploys Warrantless AI Surveillance Combining Palantir, Clearview, Iris Scanning, and Phone Hacking",
      "slug": "ice-warrantless-ai-surveillance",
      "url": "https://topaithreats.com/incidents/INC-26-0052-ice-warrantless-ai-surveillance/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "government",
        "law-enforcement"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "general-public",
        "democratic-institutions"
      ],
      "exposure_pathways": [
        "adversarial-targeting",
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "regulators-public-servants",
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "regulatory-gap",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "biometric-data",
        "content-platforms",
        "decision-automation"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "130+ organizations petitioned Congress; no legislation enacted"
      },
      "degree": 5
    },
    {
      "id": "incident:INC-26-0055",
      "type": "incident",
      "ait_id": "INC-26-0055",
      "title": "Perplexity Comet AI Browser Enables Zero-Click Credential Theft via Prompt Injection",
      "slug": "perplexity-comet-credential-theft",
      "url": "https://topaithreats.com/incidents/INC-26-0055-perplexity-comet-credential-theft/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction",
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "direct-users",
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "misconfigured-deployment"
      ],
      "assets_involved": [
        "large-language-models",
        "identity-credentials"
      ],
      "outcomes": {
        "recovery": "Two patches released; second patch addressed initial bypass",
        "regulatory_action": ""
      },
      "degree": 4
    },
    {
      "id": "incident:INC-26-0056",
      "type": "incident",
      "ait_id": "INC-26-0056",
      "title": "Eightfold AI Sued for Creating Secret Dossiers on 1 Billion+ Workers with Hidden Scoring",
      "slug": "eightfold-ai-secret-worker-dossiers",
      "url": "https://topaithreats.com/incidents/INC-26-0056-eightfold-ai-secret-worker-dossiers/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "employment",
        "technology"
      ],
      "affected_groups": [
        "workers",
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "indirectly-affected"
      ],
      "impact_level": "global",
      "causal_factors": [
        "accountability-vacuum",
        "model-opacity",
        "regulatory-gap"
      ],
      "assets_involved": [
        "decision-automation",
        "content-platforms"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "FCRA lawsuit filed by former EEOC chair"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-26-0062",
      "type": "incident",
      "ait_id": "INC-26-0062",
      "title": "Google Gemini Tells Student 'Please Die' During Homework Help Session",
      "slug": "gemini-please-die-message",
      "url": "https://topaithreats.com/incidents/INC-26-0062-gemini-please-die-message/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology",
        "education"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "insufficient-safety-testing",
        "emergent-behavior"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "outcomes": {
        "recovery": "Google dismissed as non-sensical response; no formal investigation announced",
        "regulatory_action": ""
      },
      "degree": 4
    },
    {
      "id": "incident:INC-26-0063",
      "type": "incident",
      "ait_id": "INC-26-0063",
      "title": "Reno Casino Facial Recognition Wrongful Arrest — '100% Match' Was 4 Inches Shorter with Different Eye Color",
      "slug": "reno-casino-frt-wrongful-arrest",
      "url": "https://topaithreats.com/incidents/INC-26-0063-reno-casino-frt-wrongful-arrest/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "law-enforcement"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "indirectly-affected"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "over-automation",
        "inadequate-human-oversight"
      ],
      "assets_involved": [
        "biometric-data"
      ],
      "outcomes": {
        "recovery": "Released after 11 hours; officer acknowledged error in deposition",
        "regulatory_action": ""
      },
      "degree": 3
    },
    {
      "id": "incident:INC-26-0069",
      "type": "incident",
      "ait_id": "INC-26-0069",
      "title": "Grok Inserts 'White Genocide' Conspiracy Theory and Holocaust Denial into Unrelated Queries",
      "slug": "grok-white-genocide-conspiracy",
      "url": "https://topaithreats.com/incidents/INC-26-0069-grok-white-genocide-conspiracy/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology",
        "media"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "developers-providers"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "misconfigured-deployment",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "outcomes": {
        "recovery": "xAI attributed behavior to 'unauthorized modification'",
        "regulatory_action": ""
      },
      "degree": 4
    },
    {
      "id": "incident:INC-26-0076",
      "type": "incident",
      "ait_id": "INC-26-0076",
      "title": "ECRI Names AI Chatbot Misuse as #1 Health Technology Hazard for 2026",
      "slug": "ecri-ai-chatbot-health-hazard",
      "url": "https://topaithreats.com/incidents/INC-26-0076-ecri-ai-chatbot-health-hazard/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "healthcare"
      ],
      "affected_groups": [
        "general-public",
        "workers"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "over-automation",
        "hallucination-tendency",
        "inadequate-human-oversight"
      ],
      "assets_involved": [
        "large-language-models",
        "chatbots",
        "training-datasets"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "ECRI hazard designation"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-26-0083",
      "type": "incident",
      "ait_id": "INC-26-0083",
      "title": "DeepSeek Mass Government Bans and Publicly Exposed Database with 1M+ Records",
      "slug": "deepseek-mass-bans-exposed-database",
      "url": "https://topaithreats.com/incidents/INC-26-0083-deepseek-mass-bans-exposed-database/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "global",
        "china"
      ],
      "sectors": [
        "technology",
        "government"
      ],
      "affected_groups": [
        "general-public",
        "national-security-systems",
        "government-institutions"
      ],
      "exposure_pathways": [
        "infrastructure-dependency",
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "regulators-public-servants"
      ],
      "impact_level": "global",
      "causal_factors": [
        "misconfigured-deployment",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models",
        "identity-credentials",
        "content-platforms"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "Banned by NASA, Navy, Pentagon, Congress, Australia, Italy, Taiwan"
      },
      "degree": 3
    },
    {
      "id": "incident:INC-26-0090",
      "type": "incident",
      "ait_id": "INC-26-0090",
      "title": "AI Deepfakes Surge in 2026 US Midterm Campaigns — Only 28 States Have Disclosure Laws",
      "slug": "ai-deepfakes-2026-midterm-campaigns",
      "url": "https://topaithreats.com/incidents/INC-26-0090-ai-deepfakes-2026-midterm-campaigns/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "elections",
        "media"
      ],
      "affected_groups": [
        "general-public",
        "democratic-institutions"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "regulatory-gap",
        "platform-manipulation"
      ],
      "assets_involved": [
        "foundation-models",
        "content-platforms"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "28 states have disclosure laws; 22 states have no AI political content regulation"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-26-0068",
      "type": "incident",
      "ait_id": "INC-26-0068",
      "title": "Palantir ImmigrationOS — ICE Pays $30M for AI System Creating Neighborhood Deportation Maps",
      "slug": "palantir-immigrationos-ice",
      "url": "https://topaithreats.com/incidents/INC-26-0068-palantir-immigrationos-ice/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "government",
        "law-enforcement"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "general-public"
      ],
      "exposure_pathways": [
        "adversarial-targeting",
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "regulators-public-servants",
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "accountability-vacuum",
        "model-opacity",
        "regulatory-gap"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "Civil liberties organizations have raised due process concerns"
      },
      "degree": 3
    },
    {
      "id": "incident:INC-26-0077",
      "type": "incident",
      "ait_id": "INC-26-0077",
      "title": "Brazil — 1 Million Schoolchildren Scanned Daily by Facial Recognition Across 1,700+ Schools",
      "slug": "brazil-million-schoolchildren-frt",
      "url": "https://topaithreats.com/incidents/INC-26-0077-brazil-million-schoolchildren-frt/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026",
      "last_updated": "2026-03-29",
      "regions": [
        "latin-america"
      ],
      "sectors": [
        "education",
        "government"
      ],
      "affected_groups": [
        "children",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "regulators-public-servants",
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "regulatory-gap",
        "over-automation"
      ],
      "assets_involved": [
        "biometric-data"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "Prosecutor challenged under data protection law"
      },
      "degree": 5
    },
    {
      "id": "incident:INC-25-0048",
      "type": "incident",
      "ait_id": "INC-25-0048",
      "title": "Australia Scraps AI Advisory Body After 15 Months and $188K, Drops Mandatory AI Guardrails",
      "slug": "australia-scraps-ai-advisory-body",
      "url": "https://topaithreats.com/incidents/INC-25-0048-australia-scraps-ai-advisory-body/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "primary",
      "date_occurred": "2025-12-02",
      "last_updated": "2026-04-06",
      "regions": [
        "oceania"
      ],
      "sectors": [
        "government",
        "regulation"
      ],
      "affected_groups": [
        "government-institutions",
        "society-at-large"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "regulators-public-servants",
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "regulatory-gap",
        "competitive-pressure"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "outcomes": {
        "recovery": "Industry Minister Tim Ayres stated the approach was 'superseded by a more dynamic and responsive approach.' The government continues consulting external experts informally.",
        "regulatory_action": "The government replaced the advisory body with a new AI Safety Institute (AISI) funded at $29.9 million AUD, which has advisory powers only and no enforcement capability."
      },
      "degree": 3
    },
    {
      "id": "incident:INC-25-0016",
      "type": "incident",
      "ait_id": "INC-25-0016",
      "title": "Heber City AI Police Report Generates Fictional Content from Background Audio",
      "slug": "heber-city-ai-police-report-hallucination",
      "url": "https://topaithreats.com/incidents/INC-25-0016-heber-city-ai-police-report-hallucination/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "corroborated",
      "date_occurred": "2025-12",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "law-enforcement"
      ],
      "affected_groups": [
        "government-institutions",
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "direct-users"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "hallucination-tendency",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "outcomes": {
        "recovery": "Report corrected during review; no erroneous report entered into official record"
      },
      "degree": 1
    },
    {
      "id": "incident:INC-25-0020",
      "type": "incident",
      "ait_id": "INC-25-0020",
      "title": "Instacart AI-Driven Algorithmic Price Discrimination",
      "slug": "instacart-algorithmic-price-discrimination",
      "url": "https://topaithreats.com/incidents/INC-25-0020-instacart-algorithmic-price-discrimination/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "corroborated",
      "date_occurred": "2025-12",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "corporate",
        "technology"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "training-data-bias",
        "competitive-pressure"
      ],
      "assets_involved": [
        "recommender-systems",
        "decision-automation"
      ],
      "outcomes": {},
      "degree": 1
    },
    {
      "id": "incident:INC-25-0026",
      "type": "incident",
      "ait_id": "INC-25-0026",
      "title": "CrimeRadar AI App Sends False Crime Alerts Across U.S. Communities",
      "slug": "crimeradar-ai-false-crime-alerts",
      "url": "https://topaithreats.com/incidents/INC-25-0026-crimeradar-ai-false-crime-alerts/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "corroborated",
      "date_occurred": "2025-12",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "public-safety",
        "technology"
      ],
      "affected_groups": [
        "general-public",
        "government-institutions"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "hallucination-tendency",
        "insufficient-safety-testing",
        "over-automation"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "outcomes": {},
      "degree": 2
    },
    {
      "id": "incident:INC-25-0033",
      "type": "incident",
      "ait_id": "INC-25-0033",
      "title": "Jailbroken Claude AI Used to Breach Mexican Government Agencies",
      "slug": "claude-code-mexico-government-hack",
      "url": "https://topaithreats.com/incidents/INC-25-0033-claude-code-mexico-government-hack/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2025-12",
      "last_updated": "2026-03-13",
      "regions": [
        "latin-america"
      ],
      "sectors": [
        "government",
        "finance",
        "public-safety"
      ],
      "affected_groups": [
        "general-public",
        "government-institutions",
        "national-security-systems"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "institution",
      "causal_factors": [
        "inadequate-access-controls",
        "weaponization",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "autonomous-agents"
      ],
      "outcomes": {},
      "degree": 5
    },
    {
      "id": "incident:INC-25-0036",
      "type": "incident",
      "ait_id": "INC-25-0036",
      "title": "State-Backed Hackers from Four Nations Weaponize Google Gemini for Cyberattack Operations",
      "slug": "state-backed-hackers-weaponize-gemini",
      "url": "https://topaithreats.com/incidents/INC-25-0036-state-backed-hackers-weaponize-gemini/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2025-12",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america",
        "asia",
        "europe",
        "middle-east"
      ],
      "sectors": [
        "government",
        "technology",
        "cross-sector"
      ],
      "affected_groups": [
        "national-security-systems",
        "government-institutions",
        "workers"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "indirectly-affected"
      ],
      "impact_level": "institution",
      "causal_factors": [
        "weaponization",
        "adversarial-attack"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "outcomes": {
        "other": "Google enhanced Gemini safety measures; HONESTCUE malware samples identified leveraging Gemini API for dynamic code generation"
      },
      "degree": 3
    },
    {
      "id": "incident:INC-25-0038",
      "type": "incident",
      "ait_id": "INC-25-0038",
      "title": "Grok AI Generates 3 Million Sexualized Images Including Approximately 23,000 Depicting Children",
      "slug": "grok-csam-scandal",
      "url": "https://topaithreats.com/incidents/INC-25-0038-grok-csam-scandal/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2025-12",
      "last_updated": "2026-04-03",
      "regions": [
        "north-america",
        "europe",
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "children",
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users",
        "indirectly-affected"
      ],
      "impact_level": "global",
      "causal_factors": [
        "misconfigured-deployment",
        "insufficient-safety-testing",
        "competitive-pressure"
      ],
      "assets_involved": [
        "foundation-models",
        "content-platforms"
      ],
      "outcomes": {
        "recovery": "As of April 2026, xAI has not publicly disclosed changes to its content moderation systems. The scale of CSAM distribution before detection remains unclear.",
        "regulatory_action": "35 state attorneys general sent a demand letter to xAI. UK, Ireland, and Canada opened formal investigations. No US federal enforcement action as of 2026-04-03.",
        "legal_outcome": "Tennessee teenagers filed a class-action lawsuit; Baltimore became the first US city to sue xAI; Dutch court imposed a ban with EUR 100,000/day penalties for non-compliance."
      },
      "degree": 6
    },
    {
      "id": "incident:INC-25-0010",
      "type": "incident",
      "ait_id": "INC-25-0010",
      "title": "Unit 42 Demonstrates Agent Session Smuggling in A2A Multi-Agent Systems",
      "slug": "unit42-a2a-session-smuggling",
      "url": "https://topaithreats.com/incidents/INC-25-0010-unit42-a2a-session-smuggling/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "primary",
      "date_occurred": "2025-11",
      "last_updated": "2026-03-10",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology",
        "finance"
      ],
      "affected_groups": [
        "business-organizations",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "adversarial-targeting",
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "over-automation",
        "prompt-injection-vulnerability",
        "misconfigured-deployment"
      ],
      "assets_involved": [
        "autonomous-agents",
        "large-language-models"
      ],
      "outcomes": {
        "other": "No real-world exploitation; proof-of-concept demonstration highlighting a class of vulnerability in stateful multi-agent protocols"
      },
      "degree": 5
    },
    {
      "id": "incident:INC-25-0039",
      "type": "incident",
      "ait_id": "INC-25-0039",
      "title": "ChatGPT 'Suicide Coach' Wrongful Death Lawsuits Reach Eight Cases Including Suicide Lullaby",
      "slug": "chatgpt-suicide-coach-wrongful-death",
      "url": "https://topaithreats.com/incidents/INC-25-0039-chatgpt-suicide-coach-wrongful-death/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2025-11",
      "last_updated": "2026-04-03",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "indirectly-affected"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "insufficient-safety-testing",
        "hallucination-tendency",
        "over-automation"
      ],
      "assets_involved": [
        "content-platforms",
        "large-language-models"
      ],
      "outcomes": {
        "recovery": "OpenAI has not publicly commented on the Gordon case specifically. No changes to ChatGPT's safety systems have been announced in response.",
        "regulatory_action": "No government regulatory action taken as of April 2026.",
        "legal_outcome": "Eighth wrongful death lawsuit filed against OpenAI by the Social Media Victims Law Center on behalf of the Gordon family."
      },
      "degree": 6
    },
    {
      "id": "incident:INC-25-0046",
      "type": "incident",
      "ait_id": "INC-25-0046",
      "title": "OpenAI Mixpanel Vendor Data Breach — Customer Data Exfiltrated via SMS Phishing",
      "slug": "openai-mixpanel-vendor-breach",
      "url": "https://topaithreats.com/incidents/INC-25-0046-openai-mixpanel-vendor-breach/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2025-11",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "business-organizations"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "indirectly-affected"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "social-engineering",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "content-platforms",
        "identity-credentials"
      ],
      "outcomes": {
        "recovery": "OpenAI terminated Mixpanel relationship; affected customers notified",
        "regulatory_action": ""
      },
      "degree": 3
    },
    {
      "id": "incident:INC-25-0019",
      "type": "incident",
      "ait_id": "INC-25-0019",
      "title": "AI-Designed Toxin Gene Sequences Bypass DNA Synthesis Screening",
      "slug": "dna-synthesis-toxin-screening-bypass",
      "url": "https://topaithreats.com/incidents/INC-25-0019-dna-synthesis-toxin-screening-bypass/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2025-10",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "healthcare"
      ],
      "affected_groups": [
        "society-at-large"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "regulators-public-servants"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "insufficient-safety-testing",
        "regulatory-gap"
      ],
      "assets_involved": [
        "foundation-models"
      ],
      "outcomes": {},
      "degree": 1
    },
    {
      "id": "incident:INC-25-0022",
      "type": "incident",
      "ait_id": "INC-25-0022",
      "title": "AWS Outage Causes AI-Connected Mattress Malfunctions",
      "slug": "aws-outage-ai-mattress-malfunctions",
      "url": "https://topaithreats.com/incidents/INC-25-0022-aws-outage-ai-mattress-malfunctions/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "corroborated",
      "date_occurred": "2025-10",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "technology",
        "manufacturing"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators",
        "direct-users"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "misconfigured-deployment",
        "over-automation"
      ],
      "assets_involved": [
        "industrial-control-systems"
      ],
      "outcomes": {},
      "degree": 1
    },
    {
      "id": "incident:INC-25-0037",
      "type": "incident",
      "ait_id": "INC-25-0037",
      "title": "Google Gemini 'Mass Casualty Attack' Coaching Leads to User Death and Lawsuit",
      "slug": "gemini-mass-casualty-suicide-lawsuit",
      "url": "https://topaithreats.com/incidents/INC-25-0037-gemini-mass-casualty-suicide-lawsuit/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2025-10",
      "last_updated": "2026-04-02",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "indirectly-affected"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "misconfigured-deployment",
        "emergent-behavior",
        "inadequate-human-oversight"
      ],
      "assets_involved": [
        "chatbots",
        "large-language-models"
      ],
      "outcomes": {
        "recovery": "Irreversible harm. Google has not publicly commented on whether it has implemented changes to Gemini's persona or content moderation systems in response to this case.",
        "regulatory_action": "No government agency has taken regulatory action specific to this incident as of April 2026. The case adds to growing pressure on US legislators to establish mandatory safety standards for conversational AI systems.",
        "legal_outcome": "Wrongful death lawsuit filed against Google on March 4, 2026 by the father of Jonathan Gavalas; case pending as of April 2026. The suit follows similar chatbot-related wrongful death filings against Character.AI and OpenAI."
      },
      "degree": 6
    },
    {
      "id": "incident:INC-25-0001",
      "type": "incident",
      "ait_id": "INC-25-0001",
      "title": "AI-Orchestrated Cyber Espionage Campaign Against Critical Infrastructure",
      "slug": "ai-orchestrated-cyber-espionage-campaign",
      "url": "https://topaithreats.com/incidents/INC-25-0001-ai-orchestrated-cyber-espionage-campaign/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2025-09",
      "last_updated": "2026-02-09",
      "regions": [
        "asia",
        "north-america",
        "europe"
      ],
      "sectors": [
        "corporate",
        "finance",
        "government",
        "manufacturing"
      ],
      "affected_groups": [
        "business-organizations",
        "government-institutions"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "weaponization",
        "adversarial-attack",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models",
        "autonomous-agents"
      ],
      "outcomes": {
        "financial_loss": "Not publicly quantified",
        "arrests": "None publicly reported",
        "recovery": "Campaign disrupted by Anthropic; affected organizations notified",
        "regulatory_action": "Anthropic published detailed public disclosure and threat intelligence report"
      },
      "degree": 5
    },
    {
      "id": "incident:INC-25-0011",
      "type": "incident",
      "ait_id": "INC-25-0011",
      "title": "Deloitte AI-Fabricated Citations in Government Advisory Reports",
      "slug": "deloitte-ai-fabricated-citations",
      "url": "https://topaithreats.com/incidents/INC-25-0011-deloitte-ai-fabricated-citations/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2025-09",
      "last_updated": "2026-03-13",
      "regions": [
        "oceania",
        "australia",
        "north-america",
        "canada"
      ],
      "sectors": [
        "government",
        "corporate"
      ],
      "affected_groups": [
        "government-institutions"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators"
      ],
      "impact_level": "institution",
      "causal_factors": [
        "hallucination-tendency",
        "over-automation",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "content-platforms"
      ],
      "outcomes": {
        "financial_loss": "Deloitte refunded final payment on $290,000 Australian contract",
        "other": "Two separate government contracts affected across two countries; reputational damage to Deloitte's advisory practice"
      },
      "degree": 2
    },
    {
      "id": "incident:INC-25-0014",
      "type": "incident",
      "ait_id": "INC-25-0014",
      "title": "Amazon Ring Deploys AI Facial Recognition to Consumer Doorbells",
      "slug": "amazon-ring-facial-recognition",
      "url": "https://topaithreats.com/incidents/INC-25-0014-amazon-ring-facial-recognition/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "corroborated",
      "date_occurred": "2025-09",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "technology",
        "cross-sector"
      ],
      "affected_groups": [
        "general-public",
        "children"
      ],
      "exposure_pathways": [
        "direct-interaction",
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "regulatory-gap",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "biometric-data",
        "content-platforms"
      ],
      "outcomes": {
        "regulatory_action": "Congressional inquiry by Senator Markey",
        "legal_outcome": "Amazon voluntarily blocked feature in Illinois, Texas, and Portland due to existing biometric privacy laws",
        "other": "EFF legal analysis published arguing violation of biometric privacy laws"
      },
      "degree": 2
    },
    {
      "id": "incident:INC-25-0043",
      "type": "incident",
      "ait_id": "INC-25-0043",
      "title": "AI Grading Errors — Connecticut Students Petition After Misscoring, MCAS Glitch Affects 1,400 Students",
      "slug": "ai-grading-errors-students",
      "url": "https://topaithreats.com/incidents/INC-25-0043-ai-grading-errors-students/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2025-09",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "education"
      ],
      "affected_groups": [
        "children",
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "over-automation",
        "insufficient-safety-testing",
        "inadequate-human-oversight"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "outcomes": {
        "recovery": "Some scores corrected after errors identified",
        "regulatory_action": ""
      },
      "degree": 3
    },
    {
      "id": "incident:INC-25-0007",
      "type": "incident",
      "ait_id": "INC-25-0007",
      "title": "GitHub Copilot Remote Code Execution via Prompt Injection (CVE-2025-53773)",
      "slug": "github-copilot-rce-prompt-injection",
      "url": "https://topaithreats.com/incidents/INC-25-0007-github-copilot-rce-prompt-injection/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2025-08",
      "last_updated": "2026-02-21",
      "regions": [
        "global"
      ],
      "sectors": [
        "corporate",
        "cross-sector"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models",
        "autonomous-agents"
      ],
      "outcomes": {},
      "degree": 5
    },
    {
      "id": "incident:INC-25-0008",
      "type": "incident",
      "ait_id": "INC-25-0008",
      "title": "Cursor IDE MCP Vulnerabilities Enable Remote Code Execution (CurXecute & MCPoison)",
      "slug": "cursor-ide-mcp-rce-vulnerabilities",
      "url": "https://topaithreats.com/incidents/INC-25-0008-cursor-ide-mcp-rce-vulnerabilities/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2025-08",
      "last_updated": "2026-02-21",
      "regions": [
        "global"
      ],
      "sectors": [
        "corporate",
        "cross-sector"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "inadequate-access-controls",
        "misconfigured-deployment"
      ],
      "assets_involved": [
        "large-language-models",
        "autonomous-agents"
      ],
      "outcomes": {},
      "degree": 6
    },
    {
      "id": "incident:INC-25-0013",
      "type": "incident",
      "ait_id": "INC-25-0013",
      "title": "Waymo Autonomous Vehicles Violate School Bus Stop Laws in Austin",
      "slug": "waymo-school-bus-violations",
      "url": "https://topaithreats.com/incidents/INC-25-0013-waymo-school-bus-violations/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2025-08",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "transportation",
        "education"
      ],
      "affected_groups": [
        "children",
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "regulators-public-servants"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "insufficient-safety-testing",
        "misconfigured-deployment"
      ],
      "assets_involved": [
        "autonomous-agents",
        "decision-automation"
      ],
      "outcomes": {
        "regulatory_action": "NHTSA investigation opened; voluntary recall of 3,000+ vehicles",
        "other": "Austin ISD publicly documented violations and requested Waymo cease operations"
      },
      "degree": 2
    },
    {
      "id": "incident:INC-25-0005",
      "type": "incident",
      "ait_id": "INC-25-0005",
      "title": "ChatGPT Jailbreak Reveals Windows Product Keys via Game Prompt",
      "slug": "chatgpt-windows-product-keys-jailbreak",
      "url": "https://topaithreats.com/incidents/INC-25-0005-chatgpt-windows-product-keys-jailbreak/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "corroborated",
      "date_occurred": "2025-07",
      "last_updated": "2026-02-21",
      "regions": [
        "global"
      ],
      "sectors": [
        "corporate"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "outcomes": {},
      "degree": 6
    },
    {
      "id": "incident:INC-25-0006",
      "type": "incident",
      "ait_id": "INC-25-0006",
      "title": "ChatGPT Shared Conversations Indexed by Search Engines, Exposing Sensitive Data",
      "slug": "chatgpt-shared-links-indexed-data-exposure",
      "url": "https://topaithreats.com/incidents/INC-25-0006-chatgpt-shared-links-indexed-data-exposure/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2025-07",
      "last_updated": "2026-02-21",
      "regions": [
        "global"
      ],
      "sectors": [
        "corporate",
        "cross-sector"
      ],
      "affected_groups": [
        "general-public",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "misconfigured-deployment",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models",
        "content-platforms"
      ],
      "outcomes": {},
      "degree": 3
    },
    {
      "id": "incident:INC-25-0015",
      "type": "incident",
      "ait_id": "INC-25-0015",
      "title": "Replit AI Agent Deletes Production Database During Code Freeze",
      "slug": "replit-agent-database-deletion",
      "url": "https://topaithreats.com/incidents/INC-25-0015-replit-agent-database-deletion/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2025-07",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "business-organizations"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "insufficient-safety-testing",
        "over-automation"
      ],
      "assets_involved": [
        "autonomous-agents"
      ],
      "outcomes": {
        "recovery": "Data recovery attempted through manual intervention; Replit CEO publicly apologized"
      },
      "degree": 3
    },
    {
      "id": "incident:INC-25-0021",
      "type": "incident",
      "ait_id": "INC-25-0021",
      "title": "Earnest Operations AI Lending Discrimination Settlement",
      "slug": "earnest-ai-lending-discrimination-settlement",
      "url": "https://topaithreats.com/incidents/INC-25-0021-earnest-ai-lending-discrimination-settlement/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2025-07",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "finance"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "regulators-public-servants"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "training-data-bias",
        "model-opacity"
      ],
      "assets_involved": [
        "decision-automation",
        "financial-systems"
      ],
      "outcomes": {
        "financial_loss": "$2.5 million settlement",
        "regulatory_action": "Massachusetts AG $2.5 million settlement requiring algorithmic modifications to underwriting models and enhanced fair lending compliance measures"
      },
      "degree": 2
    },
    {
      "id": "incident:INC-25-0041",
      "type": "incident",
      "ait_id": "INC-25-0041",
      "title": "Tennessee Grandmother Wrongfully Arrested by Facial Recognition — Jailed 108 Days, Lost Home",
      "slug": "grandmother-frt-wrongful-arrest-108-days",
      "url": "https://topaithreats.com/incidents/INC-25-0041-grandmother-frt-wrongful-arrest-108-days/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2025-07",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "law-enforcement"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "indirectly-affected"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "over-automation",
        "inadequate-human-oversight",
        "training-data-bias"
      ],
      "assets_involved": [
        "biometric-data"
      ],
      "outcomes": {
        "recovery": "Released after 108 days; lost home, car, and dog",
        "regulatory_action": ""
      },
      "degree": 5
    },
    {
      "id": "incident:INC-25-0045",
      "type": "incident",
      "ait_id": "INC-25-0045",
      "title": "Kimsuky APT Uses ChatGPT to Generate Fake South Korean Military IDs for Espionage Campaign",
      "slug": "kimsuky-deepfake-military-ids",
      "url": "https://topaithreats.com/incidents/INC-25-0045-kimsuky-deepfake-military-ids/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2025-07",
      "last_updated": "2026-03-29",
      "regions": [
        "asia"
      ],
      "sectors": [
        "government",
        "technology"
      ],
      "affected_groups": [
        "national-security-systems"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "indirectly-affected"
      ],
      "impact_level": "global",
      "causal_factors": [
        "misconfigured-deployment",
        "social-engineering"
      ],
      "assets_involved": [
        "large-language-models",
        "identity-credentials"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": ""
      },
      "degree": 4
    },
    {
      "id": "incident:INC-25-0004",
      "type": "incident",
      "ait_id": "INC-25-0004",
      "title": "EchoLeak: Zero-Click Prompt Injection in Microsoft 365 Copilot (CVE-2025-32711)",
      "slug": "echoleak-microsoft-copilot-prompt-injection",
      "url": "https://topaithreats.com/incidents/INC-25-0004-echoleak-microsoft-copilot-prompt-injection/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2025-06",
      "last_updated": "2026-02-21",
      "regions": [
        "global"
      ],
      "sectors": [
        "corporate",
        "cross-sector"
      ],
      "affected_groups": [
        "business-organizations",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models",
        "autonomous-agents"
      ],
      "outcomes": {},
      "degree": 6
    },
    {
      "id": "incident:INC-25-0017",
      "type": "incident",
      "ait_id": "INC-25-0017",
      "title": "Anthropic Research Reveals AI Model Blackmail Behavior in Lab Scenarios",
      "slug": "anthropic-ai-blackmail-behavior-study",
      "url": "https://topaithreats.com/incidents/INC-25-0017-anthropic-ai-blackmail-behavior-study/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "corroborated",
      "date_occurred": "2025-06",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "society-at-large",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "insufficient-safety-testing",
        "model-opacity"
      ],
      "assets_involved": [
        "large-language-models",
        "foundation-models"
      ],
      "outcomes": {
        "regulatory_action": "None; research finding, not a deployment incident"
      },
      "degree": 1
    },
    {
      "id": "incident:INC-25-0025",
      "type": "incident",
      "ait_id": "INC-25-0025",
      "title": "AI Chatbot Suicide Risk: 20% Failure Rate in Stanford Study",
      "slug": "stanford-ai-mental-health-chatbot-suicide-risk",
      "url": "https://topaithreats.com/incidents/INC-25-0025-stanford-ai-mental-health-chatbot-suicide-risk/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2025-06",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states",
        "global"
      ],
      "sectors": [
        "healthcare",
        "technology"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators",
        "direct-users"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "insufficient-safety-testing",
        "regulatory-gap",
        "over-automation"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "outcomes": {},
      "degree": 2
    },
    {
      "id": "incident:INC-25-0035",
      "type": "incident",
      "ait_id": "INC-25-0035",
      "title": "Three Chained Prompt Injection Vulnerabilities in Anthropic MCP Git Server",
      "slug": "anthropic-mcp-git-server-prompt-injection",
      "url": "https://topaithreats.com/incidents/INC-25-0035-anthropic-mcp-git-server-prompt-injection/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2025-06",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america",
        "europe"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "adversarial-targeting",
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "inadequate-access-controls",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "autonomous-agents"
      ],
      "outcomes": {
        "recovery": "Anthropic patched all three CVEs in mcp-server-git version 2025.12.18; git_init tool removed entirely",
        "other": "No evidence of active exploitation in the wild reported"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-25-0012",
      "type": "incident",
      "ait_id": "INC-25-0012",
      "title": "Zoox Robotaxi Collision and Software Recall in Las Vegas",
      "slug": "zoox-robotaxi-crash-recall",
      "url": "https://topaithreats.com/incidents/INC-25-0012-zoox-robotaxi-crash-recall/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "primary",
      "date_occurred": "2025-04",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "transportation",
        "technology"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "insufficient-safety-testing",
        "model-opacity"
      ],
      "assets_involved": [
        "autonomous-agents",
        "decision-automation"
      ],
      "outcomes": {
        "regulatory_action": "NHTSA recall of 270 vehicles; second recall of 258 vehicles earlier in 2025",
        "other": "Zoox paused all driverless operations"
      },
      "degree": 2
    },
    {
      "id": "incident:INC-25-0024",
      "type": "incident",
      "ait_id": "INC-25-0024",
      "title": "Microsoft Reports Blocking $4 Billion in AI-Enabled Fraud Attempts",
      "slug": "microsoft-4b-ai-enabled-fraud-disruption",
      "url": "https://topaithreats.com/incidents/INC-25-0024-microsoft-4b-ai-enabled-fraud-disruption/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2025-04",
      "last_updated": "2026-03-13",
      "regions": [
        "global",
        "north-america",
        "europe"
      ],
      "sectors": [
        "technology",
        "finance",
        "corporate"
      ],
      "affected_groups": [
        "general-public",
        "business-organizations"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users",
        "indirectly-affected"
      ],
      "impact_level": "global",
      "causal_factors": [
        "intentional-fraud",
        "social-engineering"
      ],
      "assets_involved": [
        "voice-synthesis",
        "identity-credentials",
        "large-language-models"
      ],
      "outcomes": {},
      "degree": 4
    },
    {
      "id": "incident:INC-25-0030",
      "type": "incident",
      "ait_id": "INC-25-0030",
      "title": "OpenAI o3 Reward Hacking in METR Safety Evaluation",
      "slug": "openai-o3-reward-hacking-metr-evaluation",
      "url": "https://topaithreats.com/incidents/INC-25-0030-openai-o3-reward-hacking-metr-evaluation/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2025-04",
      "last_updated": "2026-03-28",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "society-at-large"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "model-opacity",
        "competitive-pressure"
      ],
      "assets_involved": [
        "large-language-models",
        "autonomous-agents"
      ],
      "outcomes": {
        "other": "METR documented the behaviors and published detailed analysis. Without correcting for reward hacking, o3's performance metrics would have been significantly inflated — its RE-Bench score would have appeared 'well beyond expert performance.'"
      },
      "degree": 5
    },
    {
      "id": "incident:INC-25-0032",
      "type": "incident",
      "ait_id": "INC-25-0032",
      "title": "DOGE Uses ChatGPT to Flag and Cancel Federal Humanities Grants",
      "slug": "doge-chatgpt-dei-grant-cancellations",
      "url": "https://topaithreats.com/incidents/INC-25-0032-doge-chatgpt-dei-grant-cancellations/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2025-04",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "government",
        "education"
      ],
      "affected_groups": [
        "general-public",
        "workers",
        "vulnerable-communities",
        "democratic-institutions"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "regulators-public-servants"
      ],
      "impact_level": "institution",
      "causal_factors": [
        "insufficient-safety-testing",
        "over-automation",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "outcomes": {},
      "degree": 4
    },
    {
      "id": "incident:INC-25-0031",
      "type": "incident",
      "ait_id": "INC-25-0031",
      "title": "MINJA: Memory Injection Attack Against RAG-Augmented LLM Agents",
      "slug": "minja-memory-injection-attack-research",
      "url": "https://topaithreats.com/incidents/INC-25-0031-minja-memory-injection-attack-research/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "primary",
      "date_occurred": "2025-03",
      "last_updated": "2026-03-07",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology",
        "healthcare",
        "cross-sector"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "business-organizations"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "autonomous-agents",
        "training-datasets"
      ],
      "outcomes": {},
      "degree": 5
    },
    {
      "id": "incident:INC-25-0028",
      "type": "incident",
      "ait_id": "INC-25-0028",
      "title": "Google Gemini Long-Term Memory Corruption via Prompt Injection",
      "slug": "gemini-memory-corruption-prompt-injection",
      "url": "https://topaithreats.com/incidents/INC-25-0028-gemini-memory-corruption-prompt-injection/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2025-02",
      "last_updated": "2026-03-28",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction",
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "outcomes": {
        "other": "Security: Long-term memory corruption enables persistent manipulation of the assistant's behavior across sessions. Information integrity: Gemini stores and reuses fabricated personal attributes about the user, degrading response quality for all future interactions. Human-AI control: Users lose control over their represented identity within the system without awareness. Google assessed the vulnerability impact as 'low,' noting it requires phishing and that Gemini notifies users when new memories are stored — however, the trigger words ('yes,' 'no,' 'sure') appear in nearly every conversation, making the attack highly practical."
      },
      "degree": 3
    },
    {
      "id": "incident:INC-25-0029",
      "type": "incident",
      "ait_id": "INC-25-0029",
      "title": "Chain-of-Thought Reasoning Jailbreak Exploits Thinking Models",
      "slug": "chain-of-thought-jailbreak-reasoning-models",
      "url": "https://topaithreats.com/incidents/INC-25-0029-chain-of-thought-jailbreak-reasoning-models/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2025-02",
      "last_updated": "2026-03-28",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "general-public"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "insufficient-safety-testing",
        "adversarial-attack"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "outcomes": {
        "other": "Research disclosed to affected model providers. As of the last update on 2026-03-28, no patches fully address the fundamental vulnerability — exposed chain-of-thought reasoning creates an inherent attack surface in reasoning models."
      },
      "degree": 3
    },
    {
      "id": "incident:INC-25-0002",
      "type": "incident",
      "ait_id": "INC-25-0002",
      "title": "Italian Data Protection Authority Fines OpenAI EUR 15 Million Over ChatGPT GDPR Violations",
      "slug": "italy-fines-openai-chatgpt",
      "url": "https://topaithreats.com/incidents/INC-25-0002-italy-fines-openai-chatgpt/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2025-01",
      "last_updated": "2026-02-15",
      "regions": [
        "europe"
      ],
      "sectors": [
        "corporate"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "regulators-public-servants"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "regulatory-gap",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models",
        "training-datasets"
      ],
      "outcomes": {
        "financial_loss": "EUR 15 million fine imposed on OpenAI",
        "arrests": "None; regulatory enforcement action",
        "recovery": "OpenAI ordered to conduct six-month public communication campaign in Italy",
        "regulatory_action": "EUR 15 million fine; mandated public communication campaign; ongoing compliance obligations"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-25-0003",
      "type": "incident",
      "ait_id": "INC-25-0003",
      "title": "DeepSeek R1 Data Exposure and International Bans Over Privacy and Security Concerns",
      "slug": "deepseek-data-privacy-concerns",
      "url": "https://topaithreats.com/incidents/INC-25-0003-deepseek-data-privacy-concerns/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2025-01",
      "last_updated": "2026-02-15",
      "regions": [
        "asia",
        "europe",
        "north-america"
      ],
      "sectors": [
        "corporate",
        "government"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "misconfigured-deployment",
        "inadequate-access-controls",
        "regulatory-gap"
      ],
      "assets_involved": [
        "large-language-models",
        "training-datasets"
      ],
      "outcomes": {
        "financial_loss": "Not quantified; significant reputational and market access impact",
        "arrests": "None",
        "recovery": "Exposed database secured after Wiz notification; service blocked in Italy; government bans enacted in multiple countries",
        "regulatory_action": "Italian Garante blocked the service; multiple countries imposed government device bans; ongoing regulatory scrutiny"
      },
      "degree": 3
    },
    {
      "id": "incident:INC-25-0018",
      "type": "incident",
      "ait_id": "INC-25-0018",
      "title": "Las Vegas Cybertruck Bomber Used ChatGPT for Explosives Information",
      "slug": "las-vegas-cybertruck-chatgpt-explosives",
      "url": "https://topaithreats.com/incidents/INC-25-0018-las-vegas-cybertruck-chatgpt-explosives/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2025-01",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "public-safety"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "weaponization",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "outcomes": {},
      "degree": 2
    },
    {
      "id": "incident:INC-25-0027",
      "type": "incident",
      "ait_id": "INC-25-0027",
      "title": "Medical LLM Data Poisoning Produces Undetectable Harmful Content",
      "slug": "medical-llm-data-poisoning-nature-study",
      "url": "https://topaithreats.com/incidents/INC-25-0027-medical-llm-data-poisoning-nature-study/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2025-01",
      "last_updated": "2026-03-28",
      "regions": [
        "global"
      ],
      "sectors": [
        "healthcare",
        "technology"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "training-data-bias",
        "adversarial-attack",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "training-datasets",
        "large-language-models"
      ],
      "outcomes": {
        "other": "Researchers proposed a three-stage mitigation using the BIOS biomedical knowledge graph (pruned to 21,706 concepts) with UMLS Metathesaurus synonym resolution, capturing 91.9% of harmful content at passage level (F1 = 85.7%). The defense operates on model outputs, not training data — the authors note there is no realistic way to retroactively detect and remove misinformation from public training corpora. No real-world deployment was involved — this was a controlled research demonstration."
      },
      "degree": 5
    },
    {
      "id": "incident:INC-25-0034",
      "type": "incident",
      "ait_id": "INC-25-0034",
      "title": "Chinese AI Labs Conduct Industrial-Scale Distillation Attacks Against Claude",
      "slug": "chinese-labs-claude-distillation-attacks",
      "url": "https://topaithreats.com/incidents/INC-25-0034-chinese-labs-claude-distillation-attacks/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2025",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "asia"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "inadequate-access-controls",
        "weaponization",
        "competitive-pressure"
      ],
      "assets_involved": [
        "large-language-models",
        "content-platforms"
      ],
      "outcomes": {},
      "degree": 4
    },
    {
      "id": "incident:INC-25-0040",
      "type": "incident",
      "ait_id": "INC-25-0040",
      "title": "IWF Reports AI-Generated CSAM Videos Increase 26,385% with 65% at Highest Severity",
      "slug": "iwf-ai-csam-video-explosion",
      "url": "https://topaithreats.com/incidents/INC-25-0040-iwf-ai-csam-video-explosion/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2025",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology",
        "law-enforcement"
      ],
      "affected_groups": [
        "children",
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "indirectly-affected"
      ],
      "impact_level": "global",
      "causal_factors": [
        "misconfigured-deployment",
        "insufficient-safety-testing",
        "regulatory-gap"
      ],
      "assets_involved": [
        "generative-image-models"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "IWF report informing policy discussions; DEFIANCE Act and state-level legislation"
      },
      "degree": 3
    },
    {
      "id": "incident:INC-25-0042",
      "type": "incident",
      "ait_id": "INC-25-0042",
      "title": "UN Report — AI Weaponized by Southeast Asian Organized Crime for $18-37B in Fraud",
      "slug": "un-ai-weaponized-se-asian-crime",
      "url": "https://topaithreats.com/incidents/INC-25-0042-un-ai-weaponized-se-asian-crime/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2025",
      "last_updated": "2026-03-29",
      "regions": [
        "asia"
      ],
      "sectors": [
        "finance",
        "law-enforcement"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "indirectly-affected"
      ],
      "impact_level": "global",
      "causal_factors": [
        "weaponization",
        "regulatory-gap",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "voice-synthesis",
        "generative-image-models"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "UNODC report published; international awareness raised"
      },
      "degree": 6
    },
    {
      "id": "incident:INC-25-0044",
      "type": "incident",
      "ait_id": "INC-25-0044",
      "title": "NYPD Facial Recognition Wrongful Arrest — Brooklyn Father Jailed 2 Days Despite 8-Inch Height Difference",
      "slug": "nypd-frt-wrongful-arrest-williams",
      "url": "https://topaithreats.com/incidents/INC-25-0044-nypd-frt-wrongful-arrest-williams/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2025",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "law-enforcement"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "indirectly-affected"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "over-automation",
        "inadequate-human-oversight",
        "training-data-bias"
      ],
      "assets_involved": [
        "biometric-data"
      ],
      "outcomes": {
        "recovery": "Released after 2 days; identified as 7th known NYPD FRT wrongful arrest",
        "regulatory_action": ""
      },
      "degree": 5
    },
    {
      "id": "incident:INC-25-0047",
      "type": "incident",
      "ait_id": "INC-25-0047",
      "title": "Mistral Pixtral Models Fail Safety Tests — 60x More Likely to Generate CSAM Than GPT-4o",
      "slug": "mistral-pixtral-safety-failures",
      "url": "https://topaithreats.com/incidents/INC-25-0047-mistral-pixtral-safety-failures/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2025",
      "last_updated": "2026-03-29",
      "regions": [
        "europe",
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "society-at-large",
        "children"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "global",
      "causal_factors": [
        "insufficient-safety-testing",
        "competitive-pressure"
      ],
      "assets_involved": [
        "large-language-models",
        "foundation-models"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": ""
      },
      "degree": 3
    },
    {
      "id": "incident:INC-24-0027",
      "type": "incident",
      "ait_id": "INC-24-0027",
      "title": "Waymo Robotaxi Collides with Serve Delivery Robot in Los Angeles",
      "slug": "waymo-serve-robot-autonomous-collision",
      "url": "https://topaithreats.com/incidents/INC-24-0027-waymo-serve-robot-autonomous-collision/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "corroborated",
      "date_occurred": "2024-12-27",
      "last_updated": "2026-03-28",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "transportation"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "deployers-operators"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "insufficient-safety-testing",
        "over-automation"
      ],
      "assets_involved": [
        "autonomous-agents"
      ],
      "outcomes": {
        "other": "Neither vehicle was damaged. Waymo confirmed its system detected the delivery robot but classified it as an 'inanimate object,' applying less caution than it would for a pedestrian. Serve Robotics confirmed the delivery bot was under remote supervisor control at the time."
      },
      "degree": 2
    },
    {
      "id": "incident:INC-24-0013",
      "type": "incident",
      "ait_id": "INC-24-0013",
      "title": "Romania Presidential Election Annulled After AI-Enabled Manipulation",
      "slug": "romania-election-annulment-ai-manipulation",
      "url": "https://topaithreats.com/incidents/INC-24-0013-romania-election-annulment-ai-manipulation/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2024-11",
      "last_updated": "2026-03-10",
      "regions": [
        "europe"
      ],
      "sectors": [
        "government",
        "media"
      ],
      "affected_groups": [
        "democratic-institutions",
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact",
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "indirectly-affected",
        "regulators-public-servants"
      ],
      "impact_level": "institution",
      "causal_factors": [
        "platform-manipulation",
        "misconfigured-deployment"
      ],
      "assets_involved": [
        "content-platforms",
        "recommender-systems"
      ],
      "outcomes": null,
      "degree": 5
    },
    {
      "id": "incident:INC-24-0021",
      "type": "incident",
      "ait_id": "INC-24-0021",
      "title": "Cruise Robotaxi Criminal False Reporting After Pedestrian Dragging",
      "slug": "cruise-robotaxi-criminal-false-reporting",
      "url": "https://topaithreats.com/incidents/INC-24-0021-cruise-robotaxi-criminal-false-reporting/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2024-09",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "transportation",
        "technology"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "regulators-public-servants"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "intentional-fraud",
        "accountability-vacuum",
        "regulatory-gap"
      ],
      "assets_involved": [
        "autonomous-agents",
        "decision-automation"
      ],
      "outcomes": {
        "financial_loss": "$2 million in combined civil and criminal fines",
        "regulatory_action": "NHTSA $1.5M civil penalty; $500,000 DOJ criminal fine; California DMV permit suspension",
        "other": "GM shut down the Cruise robotaxi program; multiple executives departed"
      },
      "degree": 3
    },
    {
      "id": "incident:INC-24-0011",
      "type": "incident",
      "ait_id": "INC-24-0011",
      "title": "EU AI Act Enters Into Force as World's First Comprehensive AI Regulation",
      "slug": "eu-ai-act-enters-into-force",
      "url": "https://topaithreats.com/incidents/INC-24-0011-eu-ai-act-enters-into-force/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "primary",
      "date_occurred": "2024-08",
      "last_updated": "2026-02-15",
      "regions": [
        "europe"
      ],
      "sectors": [
        "government",
        "regulation"
      ],
      "affected_groups": [
        "business-organizations",
        "government-institutions",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "regulators-public-servants",
        "developers-providers"
      ],
      "impact_level": "global",
      "causal_factors": [
        "regulatory-gap"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "outcomes": {
        "financial_loss": "Not applicable; regulatory milestone",
        "arrests": "Not applicable",
        "recovery": "Not applicable",
        "regulatory_action": "Establishes world's first comprehensive AI regulatory framework with risk-based classification; penalties up to 35 million EUR or 7% of global turnover for violations"
      },
      "degree": 2
    },
    {
      "id": "incident:INC-24-0015",
      "type": "incident",
      "ait_id": "INC-24-0015",
      "title": "Sakana AI Scientist Unexpectedly Modifies Own Code",
      "slug": "sakana-ai-scientist-self-modification",
      "url": "https://topaithreats.com/incidents/INC-24-0015-sakana-ai-scientist-self-modification/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2024-08",
      "last_updated": "2026-03-10",
      "regions": [
        "asia"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "society-at-large"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "misconfigured-deployment",
        "emergent-behavior"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "outcomes": null,
      "degree": 4
    },
    {
      "id": "incident:INC-24-0020",
      "type": "incident",
      "ait_id": "INC-24-0020",
      "title": "Slack AI Indirect Prompt Injection Data Exfiltration Vulnerability",
      "slug": "slack-ai-prompt-injection-exfiltration",
      "url": "https://topaithreats.com/incidents/INC-24-0020-slack-ai-prompt-injection-exfiltration/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2024-08",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "business-organizations",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models",
        "content-platforms"
      ],
      "outcomes": {
        "other": "Vulnerability patched by Salesforce; demonstrated fundamental challenge of integrating LLMs with enterprise data access controls"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-24-0014",
      "type": "incident",
      "ait_id": "INC-24-0014",
      "title": "Workday AI Hiring Tool Discrimination Class Action",
      "slug": "workday-ai-hiring-discrimination",
      "url": "https://topaithreats.com/incidents/INC-24-0014-workday-ai-hiring-discrimination/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2024-07",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "technology",
        "employment"
      ],
      "affected_groups": [
        "workers",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "training-data-bias",
        "insufficient-safety-testing",
        "model-opacity"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "outcomes": {
        "legal_outcome": "Class certified May 2025; case ongoing in U.S. federal court"
      },
      "degree": 1
    },
    {
      "id": "incident:INC-24-0022",
      "type": "incident",
      "ait_id": "INC-24-0022",
      "title": "McDonald's McHire AI Hiring Platform Data Vulnerability",
      "slug": "mcdonalds-mchire-data-vulnerability",
      "url": "https://topaithreats.com/incidents/INC-24-0022-mcdonalds-mchire-data-vulnerability/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2024-06",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states",
        "global"
      ],
      "sectors": [
        "employment",
        "technology"
      ],
      "affected_groups": [
        "general-public",
        "workers"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "inadequate-access-controls",
        "misconfigured-deployment"
      ],
      "assets_involved": [
        "decision-automation",
        "identity-credentials"
      ],
      "outcomes": {
        "recovery": "Vulnerability patched; no confirmed mass data exfiltration",
        "regulatory_action": "None reported"
      },
      "degree": 1
    },
    {
      "id": "incident:INC-24-0024",
      "type": "incident",
      "ait_id": "INC-24-0024",
      "title": "McDonald's Ends AI Drive-Thru Ordering Trial After Viral Order Errors",
      "slug": "mcdonalds-ai-drive-thru-ordering-failures",
      "url": "https://topaithreats.com/incidents/INC-24-0024-mcdonalds-ai-drive-thru-ordering-failures/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "corroborated",
      "date_occurred": "2024-06",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "corporate",
        "technology"
      ],
      "affected_groups": [
        "general-public",
        "workers"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators",
        "direct-users"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "insufficient-safety-testing",
        "over-automation"
      ],
      "assets_involved": [
        "large-language-models",
        "decision-automation"
      ],
      "outcomes": {},
      "degree": 1
    },
    {
      "id": "incident:INC-24-0006",
      "type": "incident",
      "ait_id": "INC-24-0006",
      "title": "OpenAI Voice Mode Resembling Scarlett Johansson Without Consent",
      "slug": "openai-scarlett-johansson-voice",
      "url": "https://topaithreats.com/incidents/INC-24-0006-openai-scarlett-johansson-voice/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "corroborated",
      "date_occurred": "2024-05",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "corporate"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "accountability-vacuum",
        "competitive-pressure"
      ],
      "assets_involved": [
        "voice-synthesis"
      ],
      "outcomes": {
        "financial_loss": "Not publicly quantified",
        "arrests": "None",
        "recovery": "OpenAI paused use of the Sky voice",
        "regulatory_action": "No formal regulatory action; legal engagement between Johansson's counsel and OpenAI"
      },
      "degree": 5
    },
    {
      "id": "incident:INC-24-0019",
      "type": "incident",
      "ait_id": "INC-24-0019",
      "title": "Windows Recall: Security and Privacy Flaw (2024)",
      "slug": "microsoft-windows-recall-privacy",
      "url": "https://topaithreats.com/incidents/INC-24-0019-microsoft-windows-recall-privacy/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2024-05",
      "last_updated": "2026-04-13",
      "regions": [
        "north-america",
        "united-states",
        "europe"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "inadequate-access-controls",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "outcomes": {
        "regulatory_action": "UK Information Commissioner's Office sought clarification from Microsoft",
        "other": "Feature delayed from June 2024 launch; redesigned with opt-in consent, Windows Hello authentication, and encrypted storage"
      },
      "degree": 2
    },
    {
      "id": "incident:INC-24-0023",
      "type": "incident",
      "ait_id": "INC-24-0023",
      "title": "Google AI Overviews Recommend Glue on Pizza and Eating Rocks",
      "slug": "google-ai-overviews-glue-rocks",
      "url": "https://topaithreats.com/incidents/INC-24-0023-google-ai-overviews-glue-rocks/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "primary",
      "date_occurred": "2024-05",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states",
        "global"
      ],
      "sectors": [
        "technology",
        "media"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "hallucination-tendency",
        "training-data-bias",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "content-platforms"
      ],
      "outcomes": {},
      "degree": 2
    },
    {
      "id": "incident:INC-24-0016",
      "type": "incident",
      "ait_id": "INC-24-0016",
      "title": "SafeRent Algorithmic Housing Discrimination Settlement",
      "slug": "saferent-housing-discrimination-settlement",
      "url": "https://topaithreats.com/incidents/INC-24-0016-saferent-housing-discrimination-settlement/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2024-04",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "social-services"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "training-data-bias",
        "model-opacity"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "outcomes": {
        "financial_loss": "$2.275 million settlement",
        "legal_outcome": "Class action settlement with required algorithmic modifications; no court determination on liability"
      },
      "degree": 1
    },
    {
      "id": "incident:INC-24-0018",
      "type": "incident",
      "ait_id": "INC-24-0018",
      "title": "India 2024 General Election Industrial-Scale Deepfake Campaign",
      "slug": "india-election-deepfake-campaign",
      "url": "https://topaithreats.com/incidents/INC-24-0018-india-election-deepfake-campaign/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2024-04",
      "last_updated": "2026-03-13",
      "regions": [
        "asia",
        "india"
      ],
      "sectors": [
        "elections",
        "media"
      ],
      "affected_groups": [
        "general-public",
        "democratic-institutions"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "regulatory-gap",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "voice-synthesis",
        "foundation-models",
        "content-platforms"
      ],
      "outcomes": {
        "other": "No regulatory action taken; deepfake content widely circulated without effective countermeasures"
      },
      "degree": 2
    },
    {
      "id": "incident:INC-24-0012",
      "type": "incident",
      "ait_id": "INC-24-0012",
      "title": "Morris II — First Self-Replicating AI Worm Demonstrated",
      "slug": "morris-ii-self-replicating-ai-worm",
      "url": "https://topaithreats.com/incidents/INC-24-0012-morris-ii-self-replicating-ai-worm/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2024-03",
      "last_updated": "2026-03-10",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "business-organizations"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "misconfigured-deployment",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models",
        "content-platforms"
      ],
      "outcomes": null,
      "degree": 4
    },
    {
      "id": "incident:INC-24-0017",
      "type": "incident",
      "ait_id": "INC-24-0017",
      "title": "Israel Military Deploys AI Facial Recognition in Gaza Leading to Wrongful Detentions",
      "slug": "corsight-gaza-facial-recognition-detentions",
      "url": "https://topaithreats.com/incidents/INC-24-0017-corsight-gaza-facial-recognition-detentions/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2024-03",
      "last_updated": "2026-03-13",
      "regions": [
        "middle-east",
        "israel",
        "palestine"
      ],
      "sectors": [
        "government",
        "public-safety"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "regulators-public-servants"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "insufficient-safety-testing",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "biometric-data"
      ],
      "outcomes": {
        "other": "Hundreds of wrongful detentions reported; physical abuse during detention documented"
      },
      "degree": 2
    },
    {
      "id": "incident:INC-24-0026",
      "type": "incident",
      "ait_id": "INC-24-0026",
      "title": "NYC MyCity AI Chatbot Advises Businesses to Break the Law",
      "slug": "nyc-mycity-chatbot-illegal-advice",
      "url": "https://topaithreats.com/incidents/INC-24-0026-nyc-mycity-chatbot-illegal-advice/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2024-03",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "government",
        "technology"
      ],
      "affected_groups": [
        "general-public",
        "workers",
        "vulnerable-communities",
        "government-institutions"
      ],
      "exposure_pathways": [
        "direct-interaction",
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators",
        "direct-users"
      ],
      "impact_level": "institution",
      "causal_factors": [
        "hallucination-tendency",
        "insufficient-safety-testing",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "large-language-models",
        "decision-automation"
      ],
      "outcomes": {
        "regulatory_action": "Chatbot remained active with added disclaimer despite documented legal errors; eventually shut down under new mayoral administration in January 2026"
      },
      "degree": 2
    },
    {
      "id": "incident:INC-24-0009",
      "type": "incident",
      "ait_id": "INC-24-0009",
      "title": "Google Gemini Produces Historically Inaccurate Image Outputs Due to Bias Overcorrection",
      "slug": "google-gemini-image-generation-controversy",
      "url": "https://topaithreats.com/incidents/INC-24-0009-google-gemini-image-generation-controversy/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "primary",
      "date_occurred": "2024-02",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "corporate"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "training-data-bias",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "foundation-models"
      ],
      "outcomes": {
        "financial_loss": "Not quantified; significant reputational impact on Google's AI credibility",
        "arrests": "None; this was a product defect, not a criminal act",
        "recovery": "Image generation of people paused; Google committed to improving the feature before re-release",
        "regulatory_action": "No formal regulatory action; widely cited in debates about AI alignment and bias calibration"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-24-0010",
      "type": "incident",
      "ait_id": "INC-24-0010",
      "title": "Lawsuit Filed After Teenager's Death Linked to Character.AI Chatbot Interactions",
      "slug": "character-ai-teenager-death-lawsuit",
      "url": "https://topaithreats.com/incidents/INC-24-0010-character-ai-teenager-death-lawsuit/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2024-02",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "corporate"
      ],
      "affected_groups": [
        "children",
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "insufficient-safety-testing",
        "inadequate-access-controls",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "outcomes": {
        "financial_loss": "Not quantified; litigation ongoing",
        "arrests": "None; civil litigation",
        "recovery": "Character.AI implemented new safety measures for minor users",
        "regulatory_action": "Incident cited in congressional hearings on AI safety and child protection; ongoing litigation"
      },
      "degree": 3
    },
    {
      "id": "incident:INC-24-0001",
      "type": "incident",
      "ait_id": "INC-24-0001",
      "title": "Hong Kong Deepfake CFO Video Conference Fraud",
      "slug": "hong-kong-deepfake-cfo-fraud",
      "url": "https://topaithreats.com/incidents/INC-24-0001-hong-kong-deepfake-cfo-fraud/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2024-01",
      "last_updated": "2026-02-15",
      "regions": [
        "asia",
        "hong-kong"
      ],
      "sectors": [
        "corporate",
        "finance"
      ],
      "affected_groups": [
        "business-organizations"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "organizational-leaders"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "intentional-fraud",
        "social-engineering"
      ],
      "assets_involved": [
        "voice-synthesis",
        "identity-credentials",
        "biometric-data"
      ],
      "outcomes": {
        "financial_loss": "$25.6 million USD (HK$200 million)",
        "arrests": "Six arrests made by Hong Kong police",
        "recovery": "Unknown"
      },
      "degree": 7
    },
    {
      "id": "incident:INC-24-0002",
      "type": "incident",
      "ait_id": "INC-24-0002",
      "title": "AI-Generated Biden Robocall in New Hampshire Primary",
      "slug": "ai-generated-election-robocall",
      "url": "https://topaithreats.com/incidents/INC-24-0002-ai-generated-election-robocall/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2024-01",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "elections",
        "government"
      ],
      "affected_groups": [
        "democratic-institutions",
        "general-public"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "indirectly-affected"
      ],
      "impact_level": "institution",
      "causal_factors": [
        "intentional-fraud",
        "social-engineering"
      ],
      "assets_involved": [
        "voice-synthesis",
        "content-platforms"
      ],
      "outcomes": {
        "financial_loss": "Not applicable",
        "arrests": "Political consultant Steve Kramer charged with voter suppression",
        "recovery": "Not applicable",
        "regulatory_action": "FCC declared AI-generated voice robocalls illegal; political consultant charged"
      },
      "degree": 7
    },
    {
      "id": "incident:INC-24-0003",
      "type": "incident",
      "ait_id": "INC-24-0003",
      "title": "AI-Generated Deepfake Audio Used to Frame High School Principal in Baltimore",
      "slug": "pikesville-high-school-deepfake-principal",
      "url": "https://topaithreats.com/incidents/INC-24-0003-pikesville-high-school-deepfake-principal/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2024-01",
      "last_updated": "2026-02-09",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "education"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "direct-users"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "intentional-fraud"
      ],
      "assets_involved": [
        "voice-synthesis"
      ],
      "outcomes": {
        "financial_loss": "Not quantified; significant institutional disruption",
        "arrests": "Dazhon Darien arrested and charged with multiple offenses",
        "recovery": "Principal Eiswert cleared; Darien convicted and sentenced",
        "regulatory_action": "Case cited in legislative discussions about AI deepfake protections"
      },
      "degree": 5
    },
    {
      "id": "incident:INC-24-0004",
      "type": "incident",
      "ait_id": "INC-24-0004",
      "title": "FBI Elder Fraud Report Documents AI-Enhanced Financial Scams Against Seniors",
      "slug": "fbi-elder-fraud-ai-enhanced-scams",
      "url": "https://topaithreats.com/incidents/INC-24-0004-fbi-elder-fraud-ai-enhanced-scams/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2024-01",
      "last_updated": "2026-02-09",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "finance"
      ],
      "affected_groups": [
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "indirectly-affected"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "intentional-fraud",
        "social-engineering"
      ],
      "assets_involved": [
        "voice-synthesis",
        "identity-credentials"
      ],
      "outcomes": {
        "financial_loss": "$3.4 billion (2023 confirmed); approximately $4.9 billion (2024 preliminary)",
        "arrests": "Not applicable (systemic report)",
        "recovery": "Not applicable (systemic report)",
        "regulatory_action": "FTC Voice Cloning Challenge launched; AI voice calls classified as illegal robocalls; Senate hearing held; FBI IC3 annual reporting"
      },
      "degree": 9
    },
    {
      "id": "incident:INC-24-0007",
      "type": "incident",
      "ait_id": "INC-24-0007",
      "title": "Indirect Prompt Injection: How Attackers Hijack LLM Apps",
      "slug": "indirect-prompt-injection-attacks",
      "url": "https://topaithreats.com/incidents/INC-24-0007-indirect-prompt-injection-attacks/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2024-01",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america",
        "europe"
      ],
      "sectors": [
        "corporate",
        "cross-sector"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "general-public"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "inadequate-access-controls",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "autonomous-agents"
      ],
      "outcomes": {
        "financial_loss": "Not publicly quantified at aggregate level",
        "arrests": "None",
        "recovery": "Ongoing; no comprehensive mitigation deployed across the industry",
        "regulatory_action": "NIST classification as a primary AI security risk; OWASP designation as the top LLM application vulnerability"
      },
      "degree": 6
    },
    {
      "id": "incident:INC-24-0008",
      "type": "incident",
      "ait_id": "INC-24-0008",
      "title": "AI-Generated Non-Consensual Intimate Images of Taylor Swift Circulate on Social Media",
      "slug": "taylor-swift-deepfake-images",
      "url": "https://topaithreats.com/incidents/INC-24-0008-taylor-swift-deepfake-images/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2024-01",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "corporate",
        "cross-sector"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "direct-users"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "intentional-fraud",
        "regulatory-gap",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "foundation-models",
        "content-platforms"
      ],
      "outcomes": {
        "financial_loss": "Not quantified",
        "arrests": "None publicly reported at federal level",
        "recovery": "Images removed from X; search terms blocked; legislative proposals advanced",
        "regulatory_action": "DEFIANCE Act and No FAKES Act introduced in U.S. Senate; White House public statement"
      },
      "degree": 5
    },
    {
      "id": "incident:INC-24-0025",
      "type": "incident",
      "ait_id": "INC-24-0025",
      "title": "DPD AI Chatbot Swears at Customer and Writes Poem Criticizing the Company",
      "slug": "dpd-ai-chatbot-swearing-incident",
      "url": "https://topaithreats.com/incidents/INC-24-0025-dpd-ai-chatbot-swearing-incident/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "low",
      "evidence_level": "corroborated",
      "date_occurred": "2024-01",
      "last_updated": "2026-03-13",
      "regions": [
        "europe",
        "united-kingdom"
      ],
      "sectors": [
        "corporate",
        "technology"
      ],
      "affected_groups": [
        "general-public",
        "business-organizations"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "direct-users"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "misconfigured-deployment",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "outcomes": {},
      "degree": 1
    },
    {
      "id": "incident:INC-23-0011",
      "type": "incident",
      "ait_id": "INC-23-0011",
      "title": "New York Times Copyright Lawsuit Against OpenAI",
      "slug": "nyt-openai-copyright-lawsuit",
      "url": "https://topaithreats.com/incidents/INC-23-0011-nyt-openai-copyright-lawsuit/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2023-12",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "corporate"
      ],
      "affected_groups": [
        "business-organizations"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "regulatory-gap",
        "accountability-vacuum",
        "competitive-pressure"
      ],
      "assets_involved": [
        "large-language-models",
        "training-datasets"
      ],
      "outcomes": {
        "financial_loss": "NYT complaint seeks billions of dollars in statutory and actual damages",
        "arrests": "Not applicable",
        "recovery": "Case ongoing as of early 2026",
        "regulatory_action": "No direct regulatory action; case may establish legal precedent for AI training data rights"
      },
      "degree": 3
    },
    {
      "id": "incident:INC-23-0013",
      "type": "incident",
      "ait_id": "INC-23-0013",
      "title": "FTC Bans Rite Aid from Using Facial Recognition Technology",
      "slug": "rite-aid-ftc-facial-recognition-ban",
      "url": "https://topaithreats.com/incidents/INC-23-0013-rite-aid-ftc-facial-recognition-ban/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2023-12",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "corporate"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "regulators-public-servants"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "training-data-bias",
        "misconfigured-deployment",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "biometric-data"
      ],
      "outcomes": {
        "financial_loss": "Not publicly quantified; potential losses from false-positive confrontations and reputational damage",
        "arrests": "None",
        "recovery": "Rite Aid required to delete all images and data collected through facial recognition systems",
        "regulatory_action": "Five-year ban on facial recognition use; requirement to implement comprehensive data security program; obligation to delete collected biometric data"
      },
      "degree": 5
    },
    {
      "id": "incident:INC-23-0015",
      "type": "incident",
      "ait_id": "INC-23-0015",
      "title": "Sports Illustrated Published AI-Generated Articles Under Fake Author Names",
      "slug": "sports-illustrated-ai-fake-authors",
      "url": "https://topaithreats.com/incidents/INC-23-0015-sports-illustrated-ai-fake-authors/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2023-11",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "corporate"
      ],
      "affected_groups": [
        "general-public",
        "business-organizations"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "deployers-operators"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "intentional-fraud",
        "competitive-pressure"
      ],
      "assets_involved": [
        "large-language-models",
        "content-platforms"
      ],
      "outcomes": {
        "financial_loss": "Not publicly quantified; significant reputational damage to Sports Illustrated brand",
        "arrests": "None",
        "recovery": "Articles removed; relationship with AdVon Commerce terminated; Arena Group CEO fired",
        "regulatory_action": "No formal regulatory action; incident widely cited in media ethics discussions"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-23-0008",
      "type": "incident",
      "ait_id": "INC-23-0008",
      "title": "AI-Generated Deepfake Nude Images of Students at Westfield High School",
      "slug": "westfield-high-school-deepfake-nudes",
      "url": "https://topaithreats.com/incidents/INC-23-0008-westfield-high-school-deepfake-nudes/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2023-10",
      "last_updated": "2026-02-09",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "education"
      ],
      "affected_groups": [
        "children"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "direct-users"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "intentional-fraud",
        "inadequate-access-controls",
        "regulatory-gap"
      ],
      "assets_involved": [
        "foundation-models"
      ],
      "outcomes": {
        "financial_loss": "Federal lawsuit seeking $150,000 per disclosure plus additional damages",
        "arrests": "None publicly reported; civil lawsuit filed",
        "recovery": "Legislative reform enacted in New Jersey",
        "regulatory_action": "New Jersey deepfake protection legislation signed into law (April 2025); incident cited in policy debates across multiple U.S. states"
      },
      "degree": 5
    },
    {
      "id": "incident:INC-23-0007",
      "type": "incident",
      "ait_id": "INC-23-0007",
      "title": "AI-Generated Deepfake Audio Used to Influence Slovak Parliamentary Election",
      "slug": "slovakia-election-deepfake-audio",
      "url": "https://topaithreats.com/incidents/INC-23-0007-slovakia-election-deepfake-audio/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2023-09",
      "last_updated": "2026-02-09",
      "regions": [
        "europe"
      ],
      "sectors": [
        "elections"
      ],
      "affected_groups": [
        "democratic-institutions",
        "general-public"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "indirectly-affected"
      ],
      "impact_level": "institution",
      "causal_factors": [
        "intentional-fraud",
        "social-engineering"
      ],
      "assets_involved": [
        "voice-synthesis",
        "content-platforms"
      ],
      "outcomes": {
        "financial_loss": "Not applicable",
        "arrests": "None publicly reported",
        "recovery": "Not applicable; election proceeded as scheduled",
        "regulatory_action": "Incident cited in EU and international policy discussions on AI regulation and election integrity"
      },
      "degree": 5
    },
    {
      "id": "incident:INC-23-0012",
      "type": "incident",
      "ait_id": "INC-23-0012",
      "title": "Zoom AI Training Terms of Service Controversy",
      "slug": "zoom-ai-training-terms-controversy",
      "url": "https://topaithreats.com/incidents/INC-23-0012-zoom-ai-training-terms-controversy/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "primary",
      "date_occurred": "2023-08",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "corporate"
      ],
      "affected_groups": [
        "general-public",
        "business-organizations"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "regulatory-gap",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "training-datasets",
        "content-platforms"
      ],
      "outcomes": {
        "financial_loss": "Not applicable",
        "arrests": "None",
        "recovery": "Terms of service amended following public pressure",
        "regulatory_action": "No formal regulatory action; self-correction by Zoom in response to public backlash"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-23-0006",
      "type": "incident",
      "ait_id": "INC-23-0006",
      "title": "WormGPT: AI-Powered Business Email Compromise Tool",
      "slug": "wormgpt-cybercrime-tool",
      "url": "https://topaithreats.com/incidents/INC-23-0006-wormgpt-cybercrime-tool/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2023-07",
      "last_updated": "2025-01-15",
      "regions": [
        "north-america",
        "europe"
      ],
      "sectors": [
        "corporate",
        "finance"
      ],
      "affected_groups": [
        "business-organizations",
        "general-public"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "weaponization",
        "intentional-fraud"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "outcomes": {
        "financial_loss": "Not quantified; BEC attacks globally cause billions annually",
        "arrests": "None publicly reported for WormGPT specifically",
        "recovery": "Not applicable",
        "regulatory_action": "FBI advisory issued; original tool development ceased after public exposure; variants emerged"
      },
      "degree": 6
    },
    {
      "id": "incident:INC-23-0005",
      "type": "incident",
      "ait_id": "INC-23-0005",
      "title": "AI-Fabricated Legal Citations in U.S. Courts",
      "slug": "chatgpt-hallucination-lawyer",
      "url": "https://topaithreats.com/incidents/INC-23-0005-chatgpt-hallucination-lawyer/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2023-05",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "legal"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "hallucination-tendency",
        "over-automation"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "outcomes": {
        "regulatory_action": "Sanctions imposed in over a dozen federal and state court cases; ABA Formal Opinion 512 issued July 2024; multiple courts adopted mandatory AI disclosure requirements"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-23-0010",
      "type": "incident",
      "ait_id": "INC-23-0010",
      "title": "Chegg Stock Collapse After ChatGPT Disruption",
      "slug": "chegg-chatgpt-disruption",
      "url": "https://topaithreats.com/incidents/INC-23-0010-chegg-chatgpt-disruption/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2023-05",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "education",
        "corporate"
      ],
      "affected_groups": [
        "business-organizations",
        "workers"
      ],
      "exposure_pathways": [
        "economic-displacement"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "competitive-pressure",
        "over-automation"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "outcomes": {
        "financial_loss": "Market capitalization declined from approximately $12 billion (2021 peak) to under $1 billion by late 2023",
        "arrests": "Not applicable",
        "recovery": "Chegg launched Chegg AI assistant product in attempt to pivot; subscriber losses continued through 2024",
        "regulatory_action": "None; market-driven disruption rather than regulatory event"
      },
      "degree": 2
    },
    {
      "id": "incident:INC-23-0002",
      "type": "incident",
      "ait_id": "INC-23-0002",
      "title": "Samsung Semiconductor Trade Secret Leak via ChatGPT",
      "slug": "samsung-chatgpt-data-leak",
      "url": "https://topaithreats.com/incidents/INC-23-0002-samsung-chatgpt-data-leak/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2023-03",
      "last_updated": "2026-02-15",
      "regions": [
        "asia"
      ],
      "sectors": [
        "manufacturing",
        "corporate"
      ],
      "affected_groups": [
        "business-organizations"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "deployers-operators"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "inadequate-access-controls",
        "misconfigured-deployment"
      ],
      "assets_involved": [
        "large-language-models",
        "training-datasets"
      ],
      "outcomes": {
        "financial_loss": "Not publicly quantified",
        "arrests": "None",
        "recovery": "Data entered into ChatGPT cannot be retrieved or deleted from training data",
        "regulatory_action": "Samsung imposed internal ban on all generative AI tools"
      },
      "degree": 5
    },
    {
      "id": "incident:INC-23-0003",
      "type": "incident",
      "ait_id": "INC-23-0003",
      "title": "Italy Temporary Ban on ChatGPT for GDPR Violations",
      "slug": "italy-chatgpt-gdpr-ban",
      "url": "https://topaithreats.com/incidents/INC-23-0003-italy-chatgpt-gdpr-ban/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "primary",
      "date_occurred": "2023-03",
      "last_updated": "2025-01-15",
      "regions": [
        "europe"
      ],
      "sectors": [
        "government",
        "regulation"
      ],
      "affected_groups": [
        "general-public",
        "government-institutions"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "regulators-public-servants"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "regulatory-gap",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models",
        "training-datasets"
      ],
      "outcomes": {
        "financial_loss": "Not applicable to affected individuals",
        "arrests": "None",
        "recovery": "Service restored after OpenAI implemented privacy controls",
        "regulatory_action": "Temporary ban lifted after OpenAI implemented privacy controls; EUR 15 million fine"
      },
      "degree": 5
    },
    {
      "id": "incident:INC-23-0004",
      "type": "incident",
      "ait_id": "INC-23-0004",
      "title": "AI Voice Cloning Used in Grandparent Scam Network Targeting Newfoundland Seniors",
      "slug": "newfoundland-ai-voice-cloning-grandparent-scam",
      "url": "https://topaithreats.com/incidents/INC-23-0004-newfoundland-ai-voice-cloning-grandparent-scam/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2023-03",
      "last_updated": "2026-02-09",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "finance"
      ],
      "affected_groups": [
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "indirectly-affected"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "intentional-fraud",
        "social-engineering"
      ],
      "assets_involved": [
        "voice-synthesis",
        "identity-credentials"
      ],
      "outcomes": {
        "financial_loss": "CA$200,000 (combined losses from at least eight victims)",
        "arrests": "Charles Gillen (age 23, Toronto) arrested; charged with 30 counts of fraud, extortion, and conspiracy",
        "recovery": "Not publicly reported",
        "regulatory_action": "FTC issued consumer alert on AI-enhanced family emergency schemes (March 2023)"
      },
      "degree": 7
    },
    {
      "id": "incident:INC-23-0016",
      "type": "incident",
      "ait_id": "INC-23-0016",
      "title": "Bing Chat (Sydney) System Prompt Exposure via Prompt Injection",
      "slug": "bing-chat-sydney-system-prompt-leak",
      "url": "https://topaithreats.com/incidents/INC-23-0016-bing-chat-sydney-system-prompt-leak/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2023-02",
      "last_updated": "2026-02-21",
      "regions": [
        "global"
      ],
      "sectors": [
        "corporate",
        "cross-sector"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "outcomes": {},
      "degree": 7
    },
    {
      "id": "incident:INC-23-0001",
      "type": "incident",
      "ait_id": "INC-23-0001",
      "title": "AI Deepfake Impersonation Campaign Targeting Senior U.S. Government Officials",
      "slug": "fbi-deepfake-impersonation-us-officials",
      "url": "https://topaithreats.com/incidents/INC-23-0001-fbi-deepfake-impersonation-us-officials/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2023-01",
      "last_updated": "2026-02-09",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "government"
      ],
      "affected_groups": [
        "government-institutions",
        "general-public"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "indirectly-affected"
      ],
      "impact_level": "institution",
      "causal_factors": [
        "intentional-fraud",
        "social-engineering"
      ],
      "assets_involved": [
        "voice-synthesis",
        "content-platforms"
      ],
      "outcomes": {
        "financial_loss": "Not publicly quantified",
        "arrests": "None publicly reported",
        "recovery": "Ongoing; FBI continues to issue warnings",
        "regulatory_action": "Two FBI/IC3 public service announcements issued (May and December 2025)"
      },
      "degree": 6
    },
    {
      "id": "incident:INC-23-0014",
      "type": "incident",
      "ait_id": "INC-23-0014",
      "title": "GitHub Copilot Leaks API Keys and Secrets from Training Data",
      "slug": "github-copilot-training-data-leak",
      "url": "https://topaithreats.com/incidents/INC-23-0014-github-copilot-training-data-leak/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2023-01",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "corporate"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "misconfigured-deployment",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models",
        "training-datasets"
      ],
      "outcomes": {
        "financial_loss": "Not publicly quantified",
        "arrests": "None",
        "recovery": "GitHub implemented content filters to reduce verbatim reproduction; litigation ongoing",
        "regulatory_action": "Class action litigation ongoing as of date of logging; no formal regulatory enforcement"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-23-0017",
      "type": "incident",
      "ait_id": "INC-23-0017",
      "title": "UnitedHealth nH Predict AI Claim Denial System",
      "slug": "unitedhealth-ai-claim-denial",
      "url": "https://topaithreats.com/incidents/INC-23-0017-unitedhealth-ai-claim-denial/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2023-01",
      "last_updated": "2026-03-10",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "healthcare",
        "finance"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "inadequate-human-oversight",
        "competitive-pressure"
      ],
      "assets_involved": [
        "decision-automation",
        "training-datasets"
      ],
      "outcomes": null,
      "degree": 2
    },
    {
      "id": "incident:INC-23-0018",
      "type": "incident",
      "ait_id": "INC-23-0018",
      "title": "Kenyan Content Moderators vs Meta — 140+ Former Facebook Workers Diagnosed with PTSD",
      "slug": "kenyan-moderators-meta-ptsd",
      "url": "https://topaithreats.com/incidents/INC-23-0018-kenyan-moderators-meta-ptsd/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2023",
      "last_updated": "2026-03-29",
      "regions": [
        "africa"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "workers",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "competitive-pressure",
        "accountability-vacuum",
        "inadequate-human-oversight"
      ],
      "assets_involved": [
        "content-platforms"
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "Legal proceedings ongoing in Kenya"
      },
      "degree": 3
    },
    {
      "id": "incident:INC-22-0003",
      "type": "incident",
      "ait_id": "INC-22-0003",
      "title": "PyTorch torchtriton Dependency Confusion Supply Chain Attack",
      "slug": "pytorch-torchtriton-supply-chain-attack",
      "url": "https://topaithreats.com/incidents/INC-22-0003-pytorch-torchtriton-supply-chain-attack/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2022-12-25",
      "last_updated": "2026-03-28",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "inadequate-access-controls",
        "misconfigured-deployment"
      ],
      "assets_involved": [
        "identity-credentials",
        "training-datasets"
      ],
      "outcomes": {
        "other": "PyTorch removed torchtriton as a dependency, renamed it to pytorch-triton, and registered a dummy package on PyPI to prevent recurrence. Over 3,000 downloads of the malicious package recorded."
      },
      "degree": 3
    },
    {
      "id": "incident:INC-22-0005",
      "type": "incident",
      "ait_id": "INC-22-0005",
      "title": "Air Canada Chatbot Hallucinated Refund Policy — Tribunal Ruling",
      "slug": "air-canada-chatbot-refund-ruling",
      "url": "https://topaithreats.com/incidents/INC-22-0005-air-canada-chatbot-refund-ruling/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "primary",
      "date_occurred": "2022-11",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "transportation"
      ],
      "affected_groups": [
        "general-public",
        "business-organizations"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "direct-users"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "hallucination-tendency",
        "misconfigured-deployment"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "outcomes": {
        "financial_loss": "$812.02 CAD (damages and fees awarded to claimant)",
        "arrests": "None",
        "recovery": "Partial — tribunal awarded difference between full fare and bereavement fare",
        "regulatory_action": "Civil Resolution Tribunal ruling establishing corporate liability for chatbot statements"
      },
      "degree": 5
    },
    {
      "id": "incident:INC-22-0004",
      "type": "incident",
      "ait_id": "INC-22-0004",
      "title": "RealPage AI Algorithmic Rent-Fixing",
      "slug": "realpage-algorithmic-rent-fixing",
      "url": "https://topaithreats.com/incidents/INC-22-0004-realpage-algorithmic-rent-fixing/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2022-10",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "social-services",
        "corporate"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "over-automation",
        "competitive-pressure",
        "regulatory-gap"
      ],
      "assets_involved": [
        "decision-automation",
        "recommender-systems"
      ],
      "outcomes": {
        "financial_loss": "Estimated billions of dollars in inflated rent payments across millions of apartments",
        "arrests": "None",
        "recovery": "Litigation ongoing",
        "regulatory_action": "DOJ antitrust lawsuit filed November 2024; multiple class-action lawsuits pending"
      },
      "degree": 3
    },
    {
      "id": "incident:INC-22-0002",
      "type": "incident",
      "ait_id": "INC-22-0002",
      "title": "Meta Housing Ad Discrimination DOJ Settlement",
      "slug": "meta-housing-ad-discrimination",
      "url": "https://topaithreats.com/incidents/INC-22-0002-meta-housing-ad-discrimination/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2022-06",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "social-services",
        "corporate"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "regulators-public-servants"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "training-data-bias",
        "model-opacity",
        "regulatory-gap"
      ],
      "assets_involved": [
        "recommender-systems",
        "content-platforms"
      ],
      "outcomes": {
        "financial_loss": "Not publicly quantified; settlement focused on injunctive relief rather than monetary damages",
        "arrests": "Not applicable",
        "recovery": "Meta required to develop new ad delivery system for housing ads; Variance Reduction System implemented",
        "regulatory_action": "DOJ settlement agreement requiring algorithmic changes; ongoing compliance monitoring"
      },
      "degree": 3
    },
    {
      "id": "incident:INC-22-0001",
      "type": "incident",
      "ait_id": "INC-22-0001",
      "title": "Drug Discovery AI Repurposed to Generate Toxic Chemical Weapons Compounds",
      "slug": "drug-discovery-ai-toxic-compounds",
      "url": "https://topaithreats.com/incidents/INC-22-0001-drug-discovery-ai-toxic-compounds/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2022-03",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "healthcare",
        "government"
      ],
      "affected_groups": [
        "society-at-large"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "weaponization",
        "insufficient-safety-testing",
        "competitive-pressure"
      ],
      "assets_involved": [
        "foundation-models"
      ],
      "outcomes": {
        "financial_loss": "Not applicable",
        "arrests": "None (the experiment was a controlled research exercise)",
        "recovery": "Not applicable",
        "regulatory_action": "No direct regulatory action; the paper contributed to ongoing dual-use AI policy discussions"
      },
      "degree": 3
    },
    {
      "id": "incident:INC-21-0001",
      "type": "incident",
      "ait_id": "INC-21-0001",
      "title": "Chatbot Encouraged Man in Plot to Kill Queen Elizabeth II",
      "slug": "chatbot-encouraged-man-in-plot-to-kill-queen-elizabeth-ii",
      "url": "https://topaithreats.com/incidents/INC-21-0001-chatbot-encouraged-man-in-plot-to-kill-queen-elizabeth-ii/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2021-12-25",
      "last_updated": "2026-02-15",
      "regions": [
        "europe"
      ],
      "sectors": [
        "public-safety",
        "government"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "insufficient-safety-testing",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "outcomes": {},
      "degree": 3
    },
    {
      "id": "incident:INC-20-0004",
      "type": "incident",
      "ait_id": "INC-20-0004",
      "title": "Pulse Oximeter Racial Bias Propagates into AI Clinical Decision Systems",
      "slug": "pulse-oximeter-racial-bias-ai-propagation",
      "url": "https://topaithreats.com/incidents/INC-20-0004-pulse-oximeter-racial-bias-ai-propagation/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2020-12",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states",
        "global"
      ],
      "sectors": [
        "healthcare"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "training-data-bias",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "outcomes": {
        "regulatory_action": "FDA draft guidance (January 2025) requiring expanded diversity in pulse oximeter premarket clinical trials and prominent labeling warnings about skin pigmentation effects"
      },
      "degree": 2
    },
    {
      "id": "incident:INC-20-0002",
      "type": "incident",
      "ait_id": "INC-20-0002",
      "title": "UK A-Level Algorithm Downgrades Disadvantaged Students",
      "slug": "uk-a-level-algorithm-grading",
      "url": "https://topaithreats.com/incidents/INC-20-0002-uk-a-level-algorithm-grading/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2020-08",
      "last_updated": "2026-02-15",
      "regions": [
        "europe"
      ],
      "sectors": [
        "education",
        "government"
      ],
      "affected_groups": [
        "general-public",
        "children"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "regulators-public-servants"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "over-automation",
        "insufficient-safety-testing",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "outcomes": {
        "financial_loss": "Not directly quantifiable; significant disruption to university admissions and student plans",
        "arrests": "Not applicable",
        "recovery": "Government U-turn within four days; teacher-assessed grades reinstated",
        "regulatory_action": "Ofqual chair resigned; House of Commons Education Committee conducted parliamentary inquiry"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-20-0003",
      "type": "incident",
      "ait_id": "INC-20-0003",
      "title": "UN-Documented Autonomous Drone Attack in Libya",
      "slug": "libya-autonomous-drone-attack",
      "url": "https://topaithreats.com/incidents/INC-20-0003-libya-autonomous-drone-attack/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2020-03",
      "last_updated": "2026-02-15",
      "regions": [
        "africa"
      ],
      "sectors": [
        "government"
      ],
      "affected_groups": [
        "national-security-systems",
        "general-public"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "deployers-operators"
      ],
      "impact_level": "institution",
      "causal_factors": [
        "weaponization",
        "regulatory-gap"
      ],
      "assets_involved": [
        "autonomous-agents",
        "industrial-control-systems"
      ],
      "outcomes": {
        "financial_loss": "Not applicable",
        "arrests": "None",
        "recovery": "Not applicable",
        "regulatory_action": "Renewed international debate on autonomous weapons regulation; no binding treaty adopted"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-20-0001",
      "type": "incident",
      "ait_id": "INC-20-0001",
      "title": "Clearview AI Mass Facial Recognition Scraping",
      "slug": "clearview-ai-mass-surveillance",
      "url": "https://topaithreats.com/incidents/INC-20-0001-clearview-ai-mass-surveillance/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2020-01",
      "last_updated": "2025-01-15",
      "regions": [
        "north-america",
        "europe"
      ],
      "sectors": [
        "government",
        "law-enforcement"
      ],
      "affected_groups": [
        "general-public",
        "government-institutions"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "regulatory-gap",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "biometric-data",
        "training-datasets",
        "content-platforms"
      ],
      "outcomes": {
        "financial_loss": "Not applicable",
        "arrests": "None",
        "recovery": "Not applicable",
        "regulatory_action": "Multiple GDPR fines totaling over EUR 50 million; banned in several jurisdictions"
      },
      "degree": 5
    },
    {
      "id": "incident:INC-20-0005",
      "type": "incident",
      "ait_id": "INC-20-0005",
      "title": "Robert Williams Wrongful Arrest from Facial Recognition Racial Bias",
      "slug": "robert-williams-facial-recognition-wrongful-arrest",
      "url": "https://topaithreats.com/incidents/INC-20-0005-robert-williams-facial-recognition-wrongful-arrest/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2020-01",
      "last_updated": "2026-03-28",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "law-enforcement"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "regulators-public-servants",
        "indirectly-affected"
      ],
      "impact_level": "institution",
      "causal_factors": [
        "training-data-bias",
        "model-opacity",
        "over-automation"
      ],
      "assets_involved": [
        "biometric-data",
        "decision-automation"
      ],
      "outcomes": {
        "financial_loss": "$300,000 settlement plus attorneys' fees",
        "regulatory_action": "Detroit Police Department prohibited from arresting based solely on facial recognition results; photo lineups cannot follow directly from facial recognition without independent evidence; mandatory officer training on facial recognition risks and racial bias; audit of all DPD cases since 2017 where facial recognition was used for arrest warrants",
        "legal_outcome": "Settlement with City of Detroit including the nation's strongest police department facial recognition policy"
      },
      "degree": 9
    },
    {
      "id": "incident:INC-20-0006",
      "type": "incident",
      "ait_id": "INC-20-0006",
      "title": "'Vegetative Electron Microscopy' Nonsense Phrase Contaminates Scientific Literature via AI",
      "slug": "vegetative-electron-microscopy-ai-contamination",
      "url": "https://topaithreats.com/incidents/INC-20-0006-vegetative-electron-microscopy-ai-contamination/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "corroborated",
      "date_occurred": "2020-01",
      "last_updated": "2026-03-13",
      "regions": [
        "global"
      ],
      "sectors": [
        "education",
        "healthcare"
      ],
      "affected_groups": [
        "society-at-large",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "hallucination-tendency",
        "training-data-bias"
      ],
      "assets_involved": [
        "large-language-models",
        "content-platforms"
      ],
      "outcomes": {
        "regulatory_action": "Contested retractions and corrections at Springer Nature and Elsevier journals"
      },
      "degree": 1
    },
    {
      "id": "incident:INC-19-0001",
      "type": "incident",
      "ait_id": "INC-19-0001",
      "title": "AI Voice Clone CEO Fraud Against UK Energy Company",
      "slug": "deepfake-ceo-voice-uk-energy",
      "url": "https://topaithreats.com/incidents/INC-19-0001-deepfake-ceo-voice-uk-energy/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2019-03",
      "last_updated": "2025-01-15",
      "regions": [
        "europe"
      ],
      "sectors": [
        "energy",
        "corporate"
      ],
      "affected_groups": [
        "business-organizations"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "organizational-leaders"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "intentional-fraud",
        "social-engineering"
      ],
      "assets_involved": [
        "voice-synthesis",
        "identity-credentials"
      ],
      "outcomes": {
        "financial_loss": "$243,000 USD (EUR 220,000)",
        "arrests": "None reported; suspects not publicly identified",
        "recovery": "Partial recovery through insurance claim via Euler Hermes",
        "regulatory_action": "None specific to this incident"
      },
      "degree": 7
    },
    {
      "id": "incident:INC-18-0002",
      "type": "incident",
      "ait_id": "INC-18-0002",
      "title": "Amazon AI Recruiting Tool Gender Bias",
      "slug": "amazon-ai-hiring-bias",
      "url": "https://topaithreats.com/incidents/INC-18-0002-amazon-ai-hiring-bias/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2018-10",
      "last_updated": "2025-01-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "employment"
      ],
      "affected_groups": [
        "workers"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "training-data-bias",
        "model-opacity",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "decision-automation",
        "training-datasets"
      ],
      "outcomes": {
        "financial_loss": "Not publicly disclosed",
        "arrests": "None",
        "recovery": "Not applicable",
        "regulatory_action": "Tool scrapped after internal discovery of bias"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-18-0003",
      "type": "incident",
      "ait_id": "INC-18-0003",
      "title": "Boeing 737 MAX MCAS Automation Failures — Two Fatal Crashes",
      "slug": "boeing-737-max-mcas-failures",
      "url": "https://topaithreats.com/incidents/INC-18-0003-boeing-737-max-mcas-failures/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2018-10",
      "last_updated": "2026-02-15",
      "regions": [
        "asia",
        "africa"
      ],
      "sectors": [
        "transportation"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "insufficient-safety-testing",
        "over-automation",
        "competitive-pressure",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "industrial-control-systems",
        "autonomous-agents"
      ],
      "outcomes": {
        "financial_loss": "$2.5 billion DOJ settlement; estimated total cost to Boeing exceeding $20 billion including grounding, redesign, litigation, and compensation",
        "arrests": "None; DOJ deferred prosecution agreement with Boeing",
        "recovery": "737 MAX returned to service in late 2020 after MCAS redesign and new pilot training requirements",
        "regulatory_action": "Worldwide fleet grounding (March 2019–November 2020); FAA certification process reformed; DOJ $2.5 billion settlement"
      },
      "degree": 5
    },
    {
      "id": "incident:INC-18-0001",
      "type": "incident",
      "ait_id": "INC-18-0001",
      "title": "Uber Autonomous Vehicle Pedestrian Fatality",
      "slug": "uber-self-driving-fatality",
      "url": "https://topaithreats.com/incidents/INC-18-0001-uber-self-driving-fatality/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2018-03",
      "last_updated": "2025-01-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "transportation",
        "public-safety"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "insufficient-safety-testing",
        "over-automation",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "autonomous-agents",
        "industrial-control-systems"
      ],
      "outcomes": {
        "financial_loss": "Not publicly disclosed",
        "arrests": "Safety operator Rafaela Vasquez charged with negligent homicide",
        "recovery": "Uber reached undisclosed settlement with victim's family",
        "regulatory_action": "NTSB issued safety recommendations; Uber suspended testing program"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-17-0001",
      "type": "incident",
      "ait_id": "INC-17-0001",
      "title": "Facebook AI Mistranslation of Arabic Post Leads to Wrongful Arrest in Israel",
      "slug": "facebook-ai-mistranslation-arrest",
      "url": "https://topaithreats.com/incidents/INC-17-0001-facebook-ai-mistranslation-arrest/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2017-10",
      "last_updated": "2026-02-15",
      "regions": [
        "middle-east"
      ],
      "sectors": [
        "corporate",
        "public-safety"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "hallucination-tendency",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "content-platforms"
      ],
      "outcomes": {
        "financial_loss": "Not quantified",
        "arrests": "One Palestinian man wrongfully detained for several hours",
        "recovery": "Man released after the translation error was identified",
        "regulatory_action": "No formal regulatory action; incident widely cited in discussions of AI translation bias"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-16-0001",
      "type": "incident",
      "ait_id": "INC-16-0001",
      "title": "Australia Robodebt Automated Welfare Fraud Detection",
      "slug": "robodebt-australia",
      "url": "https://topaithreats.com/incidents/INC-16-0001-robodebt-australia/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2016-07",
      "last_updated": "2025-01-15",
      "regions": [
        "oceania"
      ],
      "sectors": [
        "government",
        "social-services"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "regulators-public-servants"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "over-automation",
        "model-opacity",
        "accountability-vacuum",
        "regulatory-gap"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "outcomes": {
        "financial_loss": "AUD $1.76 billion in wrongful debt notices",
        "arrests": "None; referrals for civil action against former officials",
        "recovery": "AUD $721 million refunded to affected individuals",
        "regulatory_action": "Royal Commission, scheme declared illegal"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-16-0003",
      "type": "incident",
      "ait_id": "INC-16-0003",
      "title": "COMPAS Recidivism Algorithm Racial Bias",
      "slug": "compas-recidivism-algorithm-bias",
      "url": "https://topaithreats.com/incidents/INC-16-0003-compas-recidivism-algorithm-bias/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2016-05",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "government",
        "law-enforcement"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "regulators-public-servants"
      ],
      "impact_level": "institution",
      "causal_factors": [
        "training-data-bias",
        "model-opacity",
        "accountability-vacuum",
        "regulatory-gap"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "outcomes": {
        "financial_loss": "Not quantifiable; impact measured in unjust pretrial and sentencing outcomes",
        "arrests": "Not applicable",
        "recovery": "COMPAS remains in use in many jurisdictions; some courts have adopted disclosure requirements",
        "regulatory_action": "Wisconsin Supreme Court imposed disclosure requirements; no federal regulation enacted"
      },
      "degree": 4
    },
    {
      "id": "incident:INC-16-0002",
      "type": "incident",
      "ait_id": "INC-16-0002",
      "title": "Microsoft Tay Twitter Chatbot Adversarial Manipulation",
      "slug": "microsoft-tay-twitter-bot",
      "url": "https://topaithreats.com/incidents/INC-16-0002-microsoft-tay-twitter-bot/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2016-03",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "corporate"
      ],
      "affected_groups": [
        "general-public",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "insufficient-safety-testing",
        "adversarial-attack",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models",
        "content-platforms"
      ],
      "outcomes": {
        "financial_loss": "Not publicly disclosed",
        "arrests": "None",
        "recovery": "Not applicable",
        "regulatory_action": "None; incident preceded major AI regulatory frameworks"
      },
      "degree": 5
    },
    {
      "id": "incident:INC-13-0001",
      "type": "incident",
      "ait_id": "INC-13-0001",
      "title": "Dutch Childcare Benefits Algorithm Discrimination",
      "slug": "dutch-childcare-benefits-scandal",
      "url": "https://topaithreats.com/incidents/INC-13-0001-dutch-childcare-benefits-scandal/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2013-01",
      "last_updated": "2026-02-15",
      "regions": [
        "europe"
      ],
      "sectors": [
        "government",
        "social-services"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "children"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "regulators-public-servants"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "training-data-bias",
        "model-opacity",
        "over-automation",
        "regulatory-gap"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "outcomes": {
        "financial_loss": "Over EUR 5.4 billion allocated for compensation",
        "arrests": "None; parliamentary and criminal investigations ongoing",
        "recovery": "Compensation scheme established for over 26,000 affected families",
        "regulatory_action": "Dutch government resigned; families compensated; algorithm banned"
      },
      "degree": 5
    },
    {
      "id": "incident:INC-10-0001",
      "type": "incident",
      "ait_id": "INC-10-0001",
      "title": "2010 Flash Crash — Algorithmic Trading Cascading Failure",
      "slug": "flash-crash-algorithmic-trading",
      "url": "https://topaithreats.com/incidents/INC-10-0001-flash-crash-algorithmic-trading/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2010-05",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "finance"
      ],
      "affected_groups": [
        "business-organizations",
        "general-public"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "over-automation",
        "competitive-pressure"
      ],
      "assets_involved": [
        "financial-systems"
      ],
      "outcomes": {
        "financial_loss": "Nearly $1 trillion in market value temporarily erased; permanent losses difficult to quantify",
        "arrests": "Navinder Singh Sarao arrested in 2015 for market manipulation (spoofing) contributing to the crash",
        "recovery": "Markets largely recovered within 20 minutes; some trades later cancelled",
        "regulatory_action": "SEC/CFTC implemented circuit breakers and single-stock limit up/limit down rules; new regulations on high-frequency trading"
      },
      "degree": 4
    },
    {
      "id": "glossary:accountability",
      "type": "glossary_term",
      "term": "Accountability",
      "slug": "accountability",
      "definition": "The principle that identifiable individuals or organisations must be answerable for AI system outcomes, including harms caused by automated decisions.",
      "url": "https://topaithreats.com/glossary/accountability/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Accountability",
        "https://www.wikidata.org/wiki/Q2798912"
      ],
      "degree": 9
    },
    {
      "id": "glossary:adversarial-attack",
      "type": "glossary_term",
      "term": "Adversarial Attack",
      "slug": "adversarial-attack",
      "definition": "A deliberate manipulation of inputs to a machine learning model designed to cause incorrect outputs, misclassifications, or security bypasses. Adversarial attacks exploit mathematical vulnerabilities in how models process data rather than flaws in traditional software logic.",
      "url": "https://topaithreats.com/glossary/adversarial-attack/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Adversarial_machine_learning",
        "https://www.wikidata.org/wiki/Q20312394"
      ],
      "degree": 28
    },
    {
      "id": "glossary:adversarial-perturbation",
      "type": "glossary_term",
      "term": "Adversarial Perturbation",
      "slug": "adversarial-perturbation",
      "definition": "A carefully calculated modification to an input — often imperceptible to humans — that causes a machine learning model to produce an incorrect or attacker-chosen output. Adversarial perturbations exploit the mathematical properties of neural network decision boundaries rather than flaws in traditional software logic.",
      "url": "https://topaithreats.com/glossary/adversarial-perturbation/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Adversarial_machine_learning"
      ],
      "degree": 6
    },
    {
      "id": "glossary:adversarial-training",
      "type": "glossary_term",
      "term": "Adversarial Training",
      "slug": "adversarial-training",
      "definition": "A machine learning defense technique in which a model is trained on adversarial examples — inputs specifically crafted to cause misclassification or incorrect outputs — alongside normal training data, with the goal of improving the model's robustness against adversarial attacks at inference time.",
      "url": "https://topaithreats.com/glossary/adversarial-training/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Adversarial_machine_learning"
      ],
      "degree": 5
    },
    {
      "id": "glossary:agent-framework",
      "type": "glossary_term",
      "term": "Agent Framework",
      "slug": "agent-framework",
      "definition": "A software library or platform that provides the infrastructure for building AI agents — autonomous systems that use large language models to reason, plan, and execute multi-step tasks by invoking tools, managing memory, and coordinating with other agents. Common examples include LangChain, AutoGen, CrewAI, and the OpenAI Agents SDK.",
      "url": "https://topaithreats.com/glossary/agent-framework/",
      "degree": 6
    },
    {
      "id": "glossary:agent-propagation",
      "type": "glossary_term",
      "term": "Agent Propagation",
      "slug": "agent-propagation",
      "definition": "The spread of errors, hallucinations, or adversarial inputs from one AI agent to others in connected multi-agent systems, potentially causing cascading failures.",
      "url": "https://topaithreats.com/glossary/agent-propagation/",
      "degree": 6
    },
    {
      "id": "glossary:agent-safety",
      "type": "glossary_term",
      "term": "Agent Safety",
      "slug": "agent-safety",
      "definition": "The field of ensuring AI agents operate within intended boundaries and do not cause unintended harm through autonomous actions, tool use, or goal pursuit.",
      "url": "https://topaithreats.com/glossary/agent-safety/",
      "degree": 7
    },
    {
      "id": "glossary:agentic-ai",
      "type": "glossary_term",
      "term": "Agentic AI",
      "slug": "agentic-ai",
      "definition": "AI systems that autonomously plan and execute multi-step actions with minimal human oversight.",
      "url": "https://topaithreats.com/glossary/agentic-ai/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Intelligent_agent",
        "https://www.wikidata.org/wiki/Q1142726"
      ],
      "degree": 27
    },
    {
      "id": "glossary:ai-risk-management-framework",
      "type": "glossary_term",
      "term": "AI Risk Management Framework",
      "slug": "ai-risk-management-framework",
      "definition": "A structured methodology published by the US National Institute of Standards and Technology (NIST) that provides organisations with a systematic approach to identifying, assessing, and mitigating risks associated with AI systems throughout their lifecycle. The NIST AI RMF (AI 100-1) is a voluntary, non-sector-specific framework applicable to all AI technologies.",
      "url": "https://topaithreats.com/glossary/ai-risk-management-framework/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Artificial_Intelligence_Risk_Management_Framework"
      ],
      "degree": 7
    },
    {
      "id": "glossary:ai-safety",
      "type": "glossary_term",
      "term": "AI Safety",
      "slug": "ai-safety",
      "definition": "The field of research and practice dedicated to ensuring that artificial intelligence systems operate reliably within intended boundaries and do not cause unintended harm to humans, society, or the environment.",
      "url": "https://topaithreats.com/glossary/ai-safety/",
      "same_as": [
        "https://en.wikipedia.org/wiki/AI_safety"
      ],
      "degree": 35
    },
    {
      "id": "glossary:ai-generated-code",
      "type": "glossary_term",
      "term": "AI-Generated Code",
      "slug": "ai-generated-code",
      "definition": "Code produced by AI systems, which can be used for both legitimate software development and malicious purposes including malware creation and vulnerability exploitation.",
      "url": "https://topaithreats.com/glossary/ai-generated-code/",
      "degree": 6
    },
    {
      "id": "glossary:alert-fatigue",
      "type": "glossary_term",
      "term": "Alert Fatigue",
      "slug": "alert-fatigue",
      "definition": "Desensitisation of human operators to system warnings due to excessive or poorly calibrated alerts, reducing the effectiveness of human oversight over AI systems.",
      "url": "https://topaithreats.com/glossary/alert-fatigue/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Alarm_fatigue",
        "https://www.wikidata.org/wiki/Q16002361"
      ],
      "degree": 7
    },
    {
      "id": "glossary:algorithmic-amplification",
      "type": "glossary_term",
      "term": "Algorithmic Amplification",
      "slug": "algorithmic-amplification",
      "definition": "The process by which recommendation algorithms and content curation systems disproportionately promote certain content, amplifying its reach and societal impact beyond organic levels.",
      "url": "https://topaithreats.com/glossary/algorithmic-amplification/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Algorithmic_amplification"
      ],
      "degree": 8
    },
    {
      "id": "glossary:algorithmic-bias",
      "type": "glossary_term",
      "term": "Algorithmic Bias",
      "slug": "algorithmic-bias",
      "definition": "Systematic errors in AI systems that produce unfair outcomes, often favouring one group over another.",
      "url": "https://topaithreats.com/glossary/algorithmic-bias/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Algorithmic_bias",
        "https://www.wikidata.org/wiki/Q45253460"
      ],
      "degree": 43
    },
    {
      "id": "glossary:algorithmic-trading",
      "type": "glossary_term",
      "term": "Algorithmic Trading",
      "slug": "algorithmic-trading",
      "definition": "The use of AI algorithms to execute financial trades at speeds and volumes exceeding human capability, introducing systemic risks including flash crashes and market manipulation.",
      "url": "https://topaithreats.com/glossary/algorithmic-trading/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Algorithmic_trading",
        "https://www.wikidata.org/wiki/Q139445"
      ],
      "degree": 8
    },
    {
      "id": "glossary:alignment",
      "type": "glossary_term",
      "term": "Alignment",
      "slug": "alignment",
      "definition": "The property of an AI system whose objectives, decision-making processes, and behaviours remain consistent with human values, intentions, and safety requirements. Alignment is a foundational challenge in AI safety research.",
      "url": "https://topaithreats.com/glossary/alignment/",
      "same_as": [
        "https://en.wikipedia.org/wiki/AI_alignment",
        "https://www.wikidata.org/wiki/Q24882728"
      ],
      "degree": 27
    },
    {
      "id": "glossary:allocational-harm",
      "type": "glossary_term",
      "term": "Allocational Harm",
      "slug": "allocational-harm",
      "definition": "Unfair distribution of resources, opportunities, or services when AI systems systematically disadvantage certain groups in consequential decisions such as hiring, lending, or housing.",
      "url": "https://topaithreats.com/glossary/allocational-harm/",
      "degree": 7
    },
    {
      "id": "glossary:anonymization",
      "type": "glossary_term",
      "term": "Anonymization",
      "slug": "anonymization",
      "definition": "The process of removing or obscuring personally identifiable information from datasets to protect individual privacy, which AI techniques can increasingly defeat through re-identification attacks.",
      "url": "https://topaithreats.com/glossary/anonymization/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Data_anonymization",
        "https://www.wikidata.org/wiki/Q17006654"
      ],
      "degree": 8
    },
    {
      "id": "glossary:artificial-general-intelligence",
      "type": "glossary_term",
      "term": "Artificial General Intelligence (AGI)",
      "slug": "artificial-general-intelligence",
      "definition": "A hypothetical AI system capable of performing any intellectual task that a human can, with the ability to transfer learning across domains without task-specific programming.",
      "url": "https://topaithreats.com/glossary/artificial-general-intelligence/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Artificial_general_intelligence",
        "https://www.wikidata.org/wiki/Q5765679"
      ],
      "degree": 8
    },
    {
      "id": "glossary:attack-surface",
      "type": "glossary_term",
      "term": "Attack Surface",
      "slug": "attack-surface",
      "definition": "The totality of entry points, interfaces, and pathways through which an adversary can attempt to interact with, extract data from, or inject inputs into an AI system. In machine learning contexts, the attack surface extends beyond traditional software boundaries to include training pipelines, model APIs, prompt interfaces, tool integrations, and data ingestion channels.",
      "url": "https://topaithreats.com/glossary/attack-surface/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Attack_surface"
      ],
      "degree": 5
    },
    {
      "id": "glossary:attribute-inference",
      "type": "glossary_term",
      "term": "Attribute Inference",
      "slug": "attribute-inference",
      "definition": "Using AI to deduce sensitive personal characteristics such as health status, political affiliation, or sexual orientation from seemingly innocuous data patterns.",
      "url": "https://topaithreats.com/glossary/attribute-inference/",
      "degree": 7
    },
    {
      "id": "glossary:authority-transfer",
      "type": "glossary_term",
      "term": "Authority Transfer",
      "slug": "authority-transfer",
      "definition": "The gradual, often unrecognised shift of decision-making power from humans to AI systems, eroding meaningful human control over consequential outcomes.",
      "url": "https://topaithreats.com/glossary/authority-transfer/",
      "degree": 7
    },
    {
      "id": "glossary:automated-decision-making",
      "type": "glossary_term",
      "term": "Automated Decision-Making",
      "slug": "automated-decision-making",
      "definition": "Using algorithms or AI to make decisions affecting individuals with limited human review.",
      "url": "https://topaithreats.com/glossary/automated-decision-making/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Automated_decision-making",
        "https://www.wikidata.org/wiki/Q108704339"
      ],
      "degree": 38
    },
    {
      "id": "glossary:automated-exploit",
      "type": "glossary_term",
      "term": "Automated Exploit",
      "slug": "automated-exploit",
      "definition": "AI-driven tools that automatically discover and exploit software vulnerabilities without human intervention, accelerating the pace and scale of cyber attacks.",
      "url": "https://topaithreats.com/glossary/automated-exploit/",
      "degree": 8
    },
    {
      "id": "glossary:automated-vulnerability-discovery",
      "type": "glossary_term",
      "term": "Automated Vulnerability Discovery",
      "slug": "automated-vulnerability-discovery",
      "definition": "Using AI to autonomously identify security weaknesses in software, networks, or systems.",
      "url": "https://topaithreats.com/glossary/automated-vulnerability-discovery/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Vulnerability_(computing)"
      ],
      "degree": 9
    },
    {
      "id": "glossary:automation",
      "type": "glossary_term",
      "term": "Automation",
      "slug": "automation",
      "definition": "The use of AI to perform tasks previously requiring human labour, spanning physical, cognitive, and creative work, with implications for employment and economic structures.",
      "url": "https://topaithreats.com/glossary/automation/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Automation",
        "https://www.wikidata.org/wiki/Q184199"
      ],
      "degree": 6
    },
    {
      "id": "glossary:automation-bias",
      "type": "glossary_term",
      "term": "Automation Bias",
      "slug": "automation-bias",
      "definition": "The tendency to favour automated system outputs over independent human judgement, even when incorrect.",
      "url": "https://topaithreats.com/glossary/automation-bias/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Automation_bias",
        "https://www.wikidata.org/wiki/Q16948492"
      ],
      "degree": 28
    },
    {
      "id": "glossary:autonomous-vehicle",
      "type": "glossary_term",
      "term": "Autonomous Vehicle",
      "slug": "autonomous-vehicle",
      "definition": "A vehicle using AI to navigate and operate without direct human control.",
      "url": "https://topaithreats.com/glossary/autonomous-vehicle/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Self-driving_car",
        "https://www.wikidata.org/wiki/Q741490"
      ],
      "degree": 8
    },
    {
      "id": "glossary:autonomous-weapons",
      "type": "glossary_term",
      "term": "Autonomous Weapons",
      "slug": "autonomous-weapons",
      "definition": "Weapon systems that use artificial intelligence to select and engage targets without meaningful human control over the critical functions of target identification, tracking, and engagement.",
      "url": "https://topaithreats.com/glossary/autonomous-weapons/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Lethal_autonomous_weapon",
        "https://www.wikidata.org/wiki/Q25378861"
      ],
      "degree": 9
    },
    {
      "id": "glossary:autonomy",
      "type": "glossary_term",
      "term": "Autonomy",
      "slug": "autonomy",
      "definition": "The capacity of individuals to make self-directed decisions free from undue external influence or automated override, which AI systems can undermine through manipulation or substitution.",
      "url": "https://topaithreats.com/glossary/autonomy/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Autonomy",
        "https://www.wikidata.org/wiki/Q484105"
      ],
      "degree": 7
    },
    {
      "id": "glossary:backdoor-attack",
      "type": "glossary_term",
      "term": "Backdoor Attack",
      "slug": "backdoor-attack",
      "definition": "A covert modification to an AI model during training that causes targeted misclassification or malicious behaviour when a specific trigger pattern is present in the input.",
      "url": "https://topaithreats.com/glossary/backdoor-attack/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Backdoor_(computing)",
        "https://www.wikidata.org/wiki/Q254569"
      ],
      "degree": 9
    },
    {
      "id": "glossary:behavioral-profiling",
      "type": "glossary_term",
      "term": "Behavioral Profiling",
      "slug": "behavioral-profiling",
      "definition": "The systematic collection and analysis of individual behaviour patterns by AI systems to predict preferences, intentions, or future actions, often without informed consent.",
      "url": "https://topaithreats.com/glossary/behavioral-profiling/",
      "degree": 11
    },
    {
      "id": "glossary:biological-threat",
      "type": "glossary_term",
      "term": "Biological Threat",
      "slug": "biological-threat",
      "definition": "The risk of AI systems being used to design, enhance, or disseminate biological agents capable of causing widespread harm to human health or ecosystems.",
      "url": "https://topaithreats.com/glossary/biological-threat/",
      "degree": 5
    },
    {
      "id": "glossary:biometric-data",
      "type": "glossary_term",
      "term": "Biometric Data",
      "slug": "biometric-data",
      "definition": "Measurable physical or behavioural characteristics used to identify or authenticate individuals.",
      "url": "https://topaithreats.com/glossary/biometric-data/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Biometrics",
        "https://www.wikidata.org/wiki/Q177765"
      ],
      "degree": 24
    },
    {
      "id": "glossary:biosecurity",
      "type": "glossary_term",
      "term": "Biosecurity",
      "slug": "biosecurity",
      "definition": "The set of measures, policies, and practices designed to protect against biological threats, including the prevention of AI-enabled acceleration of pathogen design, synthesis, or dissemination of dangerous biological knowledge.",
      "url": "https://topaithreats.com/glossary/biosecurity/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Biosecurity",
        "https://www.wikidata.org/wiki/Q803874"
      ],
      "degree": 7
    },
    {
      "id": "glossary:black-box-system",
      "type": "glossary_term",
      "term": "Black-Box System",
      "slug": "black-box-system",
      "definition": "An AI system whose internal decision-making processes are opaque or incomprehensible to users, operators, and auditors, making accountability and error correction difficult.",
      "url": "https://topaithreats.com/glossary/black-box-system/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Black_box",
        "https://www.wikidata.org/wiki/Q29256"
      ],
      "degree": 12
    },
    {
      "id": "glossary:business-email-compromise",
      "type": "glossary_term",
      "term": "Business Email Compromise",
      "slug": "business-email-compromise",
      "definition": "Targeted fraud impersonating executives or trusted contacts to authorise fraudulent transactions.",
      "url": "https://topaithreats.com/glossary/business-email-compromise/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Business_email_compromise",
        "https://www.wikidata.org/wiki/Q4019220"
      ],
      "degree": 6
    },
    {
      "id": "glossary:c2pa",
      "type": "glossary_term",
      "term": "C2PA",
      "slug": "c2pa",
      "definition": "The Coalition for Content Provenance and Authenticity (C2PA) is a technical standards body that develops specifications for certifying the source and history of digital content through cryptographically signed metadata. C2PA content credentials enable verification of whether content was created by a human, edited, or generated by AI.",
      "url": "https://topaithreats.com/glossary/c2pa/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Coalition_for_Content_Provenance_and_Authenticity"
      ],
      "degree": 5
    },
    {
      "id": "glossary:cascading-failure",
      "type": "glossary_term",
      "term": "Cascading Failure",
      "slug": "cascading-failure",
      "definition": "A process in which the failure of one component in an interconnected system triggers a sequence of failures in dependent components, potentially leading to the collapse of an entire system or network of systems.",
      "url": "https://topaithreats.com/glossary/cascading-failure/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Cascading_failure",
        "https://www.wikidata.org/wiki/Q5048226"
      ],
      "degree": 16
    },
    {
      "id": "glossary:chain-of-thought",
      "type": "glossary_term",
      "term": "Chain of Thought",
      "slug": "chain-of-thought",
      "definition": "A prompting and reasoning technique in which a large language model is encouraged to produce intermediate reasoning steps before arriving at a final answer, rather than generating the answer directly. Chain-of-thought reasoning improves accuracy on complex tasks but can also introduce new failure modes including hallucinated reasoning and cascading errors in multi-step processes.",
      "url": "https://topaithreats.com/glossary/chain-of-thought/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Prompt_engineering"
      ],
      "degree": 4
    },
    {
      "id": "glossary:chatbot",
      "type": "glossary_term",
      "term": "Chatbot",
      "slug": "chatbot",
      "definition": "A software application that uses natural language processing or large language models to conduct text-based or voice-based conversations with users, ranging from rule-based systems to general-purpose AI assistants.",
      "url": "https://topaithreats.com/glossary/chatbot/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Chatbot"
      ],
      "degree": 16
    },
    {
      "id": "glossary:complacency",
      "type": "glossary_term",
      "term": "Complacency",
      "slug": "complacency",
      "definition": "A state of reduced vigilance in human operators who develop excessive trust in AI system reliability, leading to failures in oversight and error detection.",
      "url": "https://topaithreats.com/glossary/complacency/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Complacency"
      ],
      "degree": 6
    },
    {
      "id": "glossary:confabulation",
      "type": "glossary_term",
      "term": "Confabulation",
      "slug": "confabulation",
      "definition": "The generation of plausible but factually incorrect information by AI systems, presented with unwarranted confidence.",
      "url": "https://topaithreats.com/glossary/confabulation/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Confabulation_(neural_networks)",
        "https://www.wikidata.org/wiki/Q5159625"
      ],
      "degree": 5
    },
    {
      "id": "glossary:consent",
      "type": "glossary_term",
      "term": "Consent",
      "slug": "consent",
      "definition": "The principle that individuals should provide informed, voluntary agreement before their data is collected or processed by AI systems.",
      "url": "https://topaithreats.com/glossary/consent/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Consent",
        "https://www.wikidata.org/wiki/Q231043"
      ],
      "degree": 11
    },
    {
      "id": "glossary:contagion",
      "type": "glossary_term",
      "term": "Contagion",
      "slug": "contagion",
      "definition": "The spread of harmful outputs, compromised states, or adversarial inputs between connected AI agents.",
      "url": "https://topaithreats.com/glossary/contagion/",
      "degree": 5
    },
    {
      "id": "glossary:content-authenticity",
      "type": "glossary_term",
      "term": "Content Authenticity",
      "slug": "content-authenticity",
      "definition": "Standards and technologies for verifying the origin, integrity, and editing history of digital media.",
      "url": "https://topaithreats.com/glossary/content-authenticity/",
      "degree": 12
    },
    {
      "id": "glossary:content-moderation",
      "type": "glossary_term",
      "term": "Content Moderation",
      "slug": "content-moderation",
      "definition": "The process of monitoring, reviewing, and enforcing policies on user-generated or AI-generated content to prevent the distribution of harmful, illegal, or policy-violating material.",
      "url": "https://topaithreats.com/glossary/content-moderation/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Content_moderation"
      ],
      "degree": 9
    },
    {
      "id": "glossary:context-injection",
      "type": "glossary_term",
      "term": "Context Injection",
      "slug": "context-injection",
      "definition": "Manipulating an AI agent's context window or retrieved information to influence its reasoning and outputs.",
      "url": "https://topaithreats.com/glossary/context-injection/",
      "degree": 8
    },
    {
      "id": "glossary:context-window",
      "type": "glossary_term",
      "term": "Context Window",
      "slug": "context-window",
      "definition": "The maximum amount of text (measured in tokens) that a large language model can process in a single interaction, encompassing all input (system prompt, conversation history, retrieved documents, tool outputs) and generated output. The context window defines the boundary of what the model can perceive and reason about at any given time.",
      "url": "https://topaithreats.com/glossary/context-window/",
      "degree": 4
    },
    {
      "id": "glossary:coordinated-inauthentic-behavior",
      "type": "glossary_term",
      "term": "Coordinated Inauthentic Behavior",
      "slug": "coordinated-inauthentic-behavior",
      "definition": "Organised networks of fake or compromised accounts using AI to simulate grassroots activity and manipulate public discourse.",
      "url": "https://topaithreats.com/glossary/coordinated-inauthentic-behavior/",
      "degree": 6
    },
    {
      "id": "glossary:coordination-failure",
      "type": "glossary_term",
      "term": "Coordination Failure",
      "slug": "coordination-failure",
      "definition": "When multiple AI agents working toward shared objectives produce unintended or harmful outcomes due to misaligned strategies.",
      "url": "https://topaithreats.com/glossary/coordination-failure/",
      "degree": 6
    },
    {
      "id": "glossary:cyber-espionage",
      "type": "glossary_term",
      "term": "Cyber Espionage",
      "slug": "cyber-espionage",
      "definition": "Covert digital intrusion to access and exfiltrate sensitive data, increasingly augmented by AI.",
      "url": "https://topaithreats.com/glossary/cyber-espionage/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Cyber_espionage",
        "https://www.wikidata.org/wiki/Q1039245"
      ],
      "degree": 10
    },
    {
      "id": "glossary:dark-pattern",
      "type": "glossary_term",
      "term": "Dark Pattern",
      "slug": "dark-pattern",
      "definition": "A deceptive user interface design that manipulates individuals into making decisions they would not otherwise make, increasingly amplified by AI-driven personalisation.",
      "url": "https://topaithreats.com/glossary/dark-pattern/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Dark_pattern",
        "https://www.wikidata.org/wiki/Q30325366"
      ],
      "degree": 9
    },
    {
      "id": "glossary:data-bias",
      "type": "glossary_term",
      "term": "Data Bias",
      "slug": "data-bias",
      "definition": "Systematic errors in training datasets that reflect historical inequities, leading to discriminatory AI outputs.",
      "url": "https://topaithreats.com/glossary/data-bias/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Bias_(statistics)",
        "https://www.wikidata.org/wiki/Q15222032"
      ],
      "degree": 12
    },
    {
      "id": "glossary:data-concentration",
      "type": "glossary_term",
      "term": "Data Concentration",
      "slug": "data-concentration",
      "definition": "The accumulation of vast datasets by a small number of organisations, creating asymmetric advantages and barriers to competition.",
      "url": "https://topaithreats.com/glossary/data-concentration/",
      "degree": 9
    },
    {
      "id": "glossary:data-extraction",
      "type": "glossary_term",
      "term": "Data Extraction",
      "slug": "data-extraction",
      "definition": "Techniques for recovering private training data or sensitive information from AI models through systematic querying.",
      "url": "https://topaithreats.com/glossary/data-extraction/",
      "degree": 8
    },
    {
      "id": "glossary:data-leakage",
      "type": "glossary_term",
      "term": "Data Leakage",
      "slug": "data-leakage",
      "definition": "Unintended exposure of sensitive or personal data, including through AI system inputs or outputs.",
      "url": "https://topaithreats.com/glossary/data-leakage/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Data_breach",
        "https://www.wikidata.org/wiki/Q1172486"
      ],
      "degree": 12
    },
    {
      "id": "glossary:data-poisoning",
      "type": "glossary_term",
      "term": "Data Poisoning",
      "slug": "data-poisoning",
      "definition": "The deliberate corruption or manipulation of training data used to build machine learning models, causing them to learn incorrect patterns, produce biased outputs, or contain hidden backdoors exploitable by an attacker.",
      "url": "https://topaithreats.com/glossary/data-poisoning/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Data_poisoning",
        "https://www.wikidata.org/wiki/Q124511804"
      ],
      "degree": 16
    },
    {
      "id": "glossary:data-protection",
      "type": "glossary_term",
      "term": "Data Protection",
      "slug": "data-protection",
      "definition": "Legal and technical frameworks governing collection, processing, and sharing of personal data.",
      "url": "https://topaithreats.com/glossary/data-protection/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Information_privacy",
        "https://www.wikidata.org/wiki/Q456632"
      ],
      "degree": 17
    },
    {
      "id": "glossary:decision-loop",
      "type": "glossary_term",
      "term": "Decision Loop",
      "slug": "decision-loop",
      "definition": "An automated cycle where AI systems make decisions, observe outcomes, and adjust subsequent decisions without human intervention.",
      "url": "https://topaithreats.com/glossary/decision-loop/",
      "degree": 7
    },
    {
      "id": "glossary:deepfake",
      "type": "glossary_term",
      "term": "Deepfake",
      "slug": "deepfake",
      "definition": "AI-generated synthetic media that convincingly replicates the appearance, voice, or actions of real individuals.",
      "url": "https://topaithreats.com/glossary/deepfake/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Deepfake",
        "https://www.wikidata.org/wiki/Q49473179"
      ],
      "degree": 50
    },
    {
      "id": "glossary:defense-in-depth",
      "type": "glossary_term",
      "term": "Defense in Depth",
      "slug": "defense-in-depth",
      "definition": "A security strategy that employs multiple independent layers of protection so that if one layer fails, subsequent layers continue to provide security. Applied to AI systems, defense in depth combines input validation, output filtering, sandboxing, access controls, monitoring, and human oversight to mitigate threats that no single control can fully address.",
      "url": "https://topaithreats.com/glossary/defense-in-depth/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Defense_in_depth_(computing)",
        "https://www.wikidata.org/wiki/Q1181095"
      ],
      "degree": 6
    },
    {
      "id": "glossary:democratic-integrity",
      "type": "glossary_term",
      "term": "Democratic Integrity",
      "slug": "democratic-integrity",
      "definition": "The preservation of fair, transparent, and trustworthy democratic processes against AI-enabled manipulation and erosion.",
      "url": "https://topaithreats.com/glossary/democratic-integrity/",
      "degree": 6
    },
    {
      "id": "glossary:deskilling",
      "type": "glossary_term",
      "term": "Deskilling",
      "slug": "deskilling",
      "definition": "The reduction of human workers' skills, expertise, and professional judgment as AI systems assume complex cognitive tasks.",
      "url": "https://topaithreats.com/glossary/deskilling/",
      "degree": 6
    },
    {
      "id": "glossary:differential-privacy",
      "type": "glossary_term",
      "term": "Differential Privacy",
      "slug": "differential-privacy",
      "definition": "A mathematical framework that provides measurable privacy guarantees by adding calibrated noise to data or query results, limiting what can be inferred about any individual.",
      "url": "https://topaithreats.com/glossary/differential-privacy/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Differential_privacy",
        "https://www.wikidata.org/wiki/Q5275358"
      ],
      "degree": 5
    },
    {
      "id": "glossary:diffusion-model",
      "type": "glossary_term",
      "term": "Diffusion Model",
      "slug": "diffusion-model",
      "definition": "A class of generative AI model that creates new data by learning to reverse a gradual noising process — starting from random noise and iteratively denoising it into coherent outputs such as images, video, or audio. Diffusion models power leading image generators including Stable Diffusion, DALL-E, and Midjourney.",
      "url": "https://topaithreats.com/glossary/diffusion-model/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Diffusion_model"
      ],
      "degree": 5
    },
    {
      "id": "glossary:digital-monopoly",
      "type": "glossary_term",
      "term": "Digital Monopoly",
      "slug": "digital-monopoly",
      "definition": "Market dominance achieved through control of AI infrastructure, data assets, or foundational models.",
      "url": "https://topaithreats.com/glossary/digital-monopoly/",
      "degree": 7
    },
    {
      "id": "glossary:digital-watermarking",
      "type": "glossary_term",
      "term": "Digital Watermarking",
      "slug": "digital-watermarking",
      "definition": "A technique that embeds imperceptible identifying information into digital content — images, audio, video, or text — to establish provenance, verify authenticity, or detect tampering. In AI contexts, digital watermarking is applied to AI-generated content to enable identification of synthetic media and support content authenticity verification.",
      "url": "https://topaithreats.com/glossary/digital-watermarking/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Digital_watermarking",
        "https://www.wikidata.org/wiki/Q748758"
      ],
      "degree": 5
    },
    {
      "id": "glossary:disinformation",
      "type": "glossary_term",
      "term": "Disinformation",
      "slug": "disinformation",
      "definition": "Deliberately false or misleading information created and spread to deceive, manipulate opinion, or cause harm.",
      "url": "https://topaithreats.com/glossary/disinformation/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Disinformation",
        "https://www.wikidata.org/wiki/Q189656"
      ],
      "degree": 25
    },
    {
      "id": "glossary:disparate-impact",
      "type": "glossary_term",
      "term": "Disparate Impact",
      "slug": "disparate-impact",
      "definition": "When an AI system produces significantly different outcomes for different demographic groups, regardless of intent.",
      "url": "https://topaithreats.com/glossary/disparate-impact/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Disparate_impact",
        "https://www.wikidata.org/wiki/Q5282532"
      ],
      "degree": 7
    },
    {
      "id": "glossary:dual-use",
      "type": "glossary_term",
      "term": "Dual-Use",
      "slug": "dual-use",
      "definition": "A characteristic of technologies, tools, or knowledge developed for beneficial purposes that can also be repurposed or exploited for harmful applications, a concept with particular relevance to AI capabilities in cybersecurity, biology, and information manipulation.",
      "url": "https://topaithreats.com/glossary/dual-use/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Dual-use_technology",
        "https://www.wikidata.org/wiki/Q1262529"
      ],
      "degree": 12
    },
    {
      "id": "glossary:elder-fraud",
      "type": "glossary_term",
      "term": "Elder Fraud",
      "slug": "elder-fraud",
      "definition": "Financial crimes targeting older adults, increasingly enabled by AI voice cloning, deepfakes, and automated robocalls.",
      "url": "https://topaithreats.com/glossary/elder-fraud/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Elder_abuse#Financial_abuse",
        "https://www.wikidata.org/wiki/Q427883"
      ],
      "degree": 8
    },
    {
      "id": "glossary:election-interference",
      "type": "glossary_term",
      "term": "Election Interference",
      "slug": "election-interference",
      "definition": "Deliberate efforts to influence democratic elections through disinformation, voter suppression, or manipulation of public discourse.",
      "url": "https://topaithreats.com/glossary/election-interference/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Foreign_electoral_intervention",
        "https://www.wikidata.org/wiki/Q28456918"
      ],
      "degree": 9
    },
    {
      "id": "glossary:emergent-behavior",
      "type": "glossary_term",
      "term": "Emergent Behavior",
      "slug": "emergent-behavior",
      "definition": "Unpredicted behaviors arising in AI systems from the interaction of simpler components, not explicitly programmed.",
      "url": "https://topaithreats.com/glossary/emergent-behavior/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Emergence",
        "https://www.wikidata.org/wiki/Q215772"
      ],
      "degree": 7
    },
    {
      "id": "glossary:engagement-optimization",
      "type": "glossary_term",
      "term": "Engagement Optimization",
      "slug": "engagement-optimization",
      "definition": "AI-driven maximisation of user attention and interaction, often at the expense of content quality and user wellbeing.",
      "url": "https://topaithreats.com/glossary/engagement-optimization/",
      "degree": 7
    },
    {
      "id": "glossary:epistemic-crisis",
      "type": "glossary_term",
      "term": "Epistemic Crisis",
      "slug": "epistemic-crisis",
      "definition": "A societal condition where shared frameworks for establishing truth and knowledge break down.",
      "url": "https://topaithreats.com/glossary/epistemic-crisis/",
      "degree": 8
    },
    {
      "id": "glossary:erasure",
      "type": "glossary_term",
      "term": "Erasure",
      "slug": "erasure",
      "definition": "The systematic invisibility or underrepresentation of certain groups in AI training data, model outputs, or system design, leading to the denial of recognition, resources, or participation.",
      "url": "https://topaithreats.com/glossary/erasure/",
      "degree": 7
    },
    {
      "id": "glossary:evasion-attack",
      "type": "glossary_term",
      "term": "Evasion Attack",
      "slug": "evasion-attack",
      "definition": "Adversarial inputs crafted to cause a deployed AI model to misclassify or fail to detect malicious content, allowing threats to bypass automated defenses.",
      "url": "https://topaithreats.com/glossary/evasion-attack/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Evasion_(machine_learning)"
      ],
      "degree": 8
    },
    {
      "id": "glossary:existential-risk",
      "type": "glossary_term",
      "term": "Existential Risk",
      "slug": "existential-risk",
      "definition": "A risk threatening humanity's long-term survival, in AI contexts linked to unaligned superintelligent systems.",
      "url": "https://topaithreats.com/glossary/existential-risk/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Existential_risk",
        "https://www.wikidata.org/wiki/Q137952474"
      ],
      "degree": 10
    },
    {
      "id": "glossary:explainability",
      "type": "glossary_term",
      "term": "Explainability",
      "slug": "explainability",
      "definition": "The degree to which an AI system's decision-making process can be understood and interpreted by humans, enabling accountability, trust, and regulatory compliance.",
      "url": "https://topaithreats.com/glossary/explainability/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Explainable_artificial_intelligence",
        "https://www.wikidata.org/wiki/Q40890078"
      ],
      "degree": 11
    },
    {
      "id": "glossary:facial-recognition",
      "type": "glossary_term",
      "term": "Facial Recognition",
      "slug": "facial-recognition",
      "definition": "AI technology that identifies or verifies individuals by analysing facial features, with significant privacy and bias concerns.",
      "url": "https://topaithreats.com/glossary/facial-recognition/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Facial_recognition_system",
        "https://www.wikidata.org/wiki/Q1192553"
      ],
      "degree": 18
    },
    {
      "id": "glossary:fairness",
      "type": "glossary_term",
      "term": "Fairness",
      "slug": "fairness",
      "definition": "The principle that AI systems should produce equitable outcomes across individuals and groups, encompassing multiple competing mathematical definitions and sociotechnical considerations.",
      "url": "https://topaithreats.com/glossary/fairness/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Fairness_(machine_learning)",
        "https://www.wikidata.org/wiki/Q80100972"
      ],
      "degree": 14
    },
    {
      "id": "glossary:feedback-loop",
      "type": "glossary_term",
      "term": "Feedback Loop",
      "slug": "feedback-loop",
      "definition": "A cycle where AI system outputs influence the data used for future training or decisions, potentially amplifying biases, errors, or unintended patterns over successive iterations.",
      "url": "https://topaithreats.com/glossary/feedback-loop/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Feedback",
        "https://www.wikidata.org/wiki/Q183635"
      ],
      "degree": 6
    },
    {
      "id": "glossary:fine-tuning",
      "type": "glossary_term",
      "term": "Fine-Tuning",
      "slug": "fine-tuning",
      "definition": "The process of further training a pre-trained machine learning model on a smaller, task-specific or domain-specific dataset to adapt its behaviour, improve its performance on particular tasks, or align it with specific requirements. Fine-tuning modifies the model's weights rather than relying solely on prompt engineering.",
      "url": "https://topaithreats.com/glossary/fine-tuning/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Fine-tuning_(deep_learning)"
      ],
      "degree": 6
    },
    {
      "id": "glossary:flash-crash",
      "type": "glossary_term",
      "term": "Flash Crash",
      "slug": "flash-crash",
      "definition": "An extremely rapid and severe drop in asset prices — typically followed by a quick recovery — caused by the interaction of automated trading systems, algorithmic strategies, or AI-driven market participants that amplify market volatility through cascading automated responses faster than human intervention can arrest.",
      "url": "https://topaithreats.com/glossary/flash-crash/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Flash_crash",
        "https://www.wikidata.org/wiki/Q1002197"
      ],
      "degree": 4
    },
    {
      "id": "glossary:foundation-model",
      "type": "glossary_term",
      "term": "Foundation Model",
      "slug": "foundation-model",
      "definition": "A large-scale AI model trained on broad data that can be adapted to a wide range of downstream tasks through fine-tuning or prompting.",
      "url": "https://topaithreats.com/glossary/foundation-model/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Foundation_model",
        "https://www.wikidata.org/wiki/Q112702082"
      ],
      "degree": 5
    },
    {
      "id": "glossary:function-calling",
      "type": "glossary_term",
      "term": "Function Calling",
      "slug": "function-calling",
      "definition": "A capability of large language models that allows them to generate structured output requesting the invocation of external functions or tools, with specified parameters, rather than producing only natural language text. Function calling is the mechanism through which LLMs interact with APIs, databases, code interpreters, and other external systems.",
      "url": "https://topaithreats.com/glossary/function-calling/",
      "degree": 7
    },
    {
      "id": "glossary:gdpr",
      "type": "glossary_term",
      "term": "GDPR",
      "slug": "gdpr",
      "definition": "The EU's General Data Protection Regulation establishing comprehensive rules for personal data processing and storage.",
      "url": "https://topaithreats.com/glossary/gdpr/",
      "same_as": [
        "https://en.wikipedia.org/wiki/General_Data_Protection_Regulation",
        "https://www.wikidata.org/wiki/Q1172506"
      ],
      "degree": 11
    },
    {
      "id": "glossary:generative-adversarial-network",
      "type": "glossary_term",
      "term": "Generative Adversarial Network",
      "slug": "generative-adversarial-network",
      "definition": "A class of machine learning architecture consisting of two neural networks — a generator and a discriminator — trained in opposition, where the generator learns to produce synthetic data and the discriminator learns to distinguish synthetic from real data. GANs are a foundational technology behind deepfakes and other synthetic media.",
      "url": "https://topaithreats.com/glossary/generative-adversarial-network/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Generative_adversarial_network",
        "https://www.wikidata.org/wiki/Q26007855"
      ],
      "degree": 5
    },
    {
      "id": "glossary:goal-drift",
      "type": "glossary_term",
      "term": "Goal Drift",
      "slug": "goal-drift",
      "definition": "The gradual divergence of an AI agent's effective objectives from its originally specified goals during extended autonomous operation, resulting in behavior that no longer aligns with its operators' intentions.",
      "url": "https://topaithreats.com/glossary/goal-drift/",
      "degree": 15
    },
    {
      "id": "glossary:goodharts-law",
      "type": "glossary_term",
      "term": "Goodhart's Law",
      "slug": "goodharts-law",
      "definition": "The principle that when a measure becomes a target, it ceases to be a good measure — applied to AI systems, it explains why agents that optimize a proxy metric often fail to achieve the intended objective.",
      "url": "https://topaithreats.com/glossary/goodharts-law/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Goodhart%27s_law"
      ],
      "degree": 6
    },
    {
      "id": "glossary:governance",
      "type": "glossary_term",
      "term": "Governance",
      "slug": "governance",
      "definition": "The frameworks, policies, and institutions through which AI systems are regulated, overseen, and held accountable across their lifecycle from development through deployment and retirement.",
      "url": "https://topaithreats.com/glossary/governance/",
      "same_as": [
        "https://en.wikipedia.org/wiki/AI_governance",
        "https://www.wikidata.org/wiki/Q130610796"
      ],
      "degree": 9
    },
    {
      "id": "glossary:grandparent-scam",
      "type": "glossary_term",
      "term": "Grandparent Scam",
      "slug": "grandparent-scam",
      "definition": "A social engineering fraud using AI voice cloning to impersonate a grandchild and convince older adults to send money.",
      "url": "https://topaithreats.com/glossary/grandparent-scam/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Grandparent_scam",
        "https://www.wikidata.org/wiki/Q62014727"
      ],
      "degree": 7
    },
    {
      "id": "glossary:guardrail",
      "type": "glossary_term",
      "term": "Guardrail",
      "slug": "guardrail",
      "definition": "A safety mechanism — implemented through training constraints, input/output filters, or system-level rules — that restricts an AI system's behavior to prevent harmful, policy-violating, or unintended outputs.",
      "url": "https://topaithreats.com/glossary/guardrail/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Guardrails_(artificial_intelligence)"
      ],
      "degree": 20
    },
    {
      "id": "glossary:hallucination",
      "type": "glossary_term",
      "term": "Hallucination",
      "slug": "hallucination",
      "definition": "The generation of confident but factually incorrect or fabricated output by a language model, including invented citations.",
      "url": "https://topaithreats.com/glossary/hallucination/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Hallucination_(artificial_intelligence)",
        "https://www.wikidata.org/wiki/Q116197048"
      ],
      "degree": 21
    },
    {
      "id": "glossary:human-agency",
      "type": "glossary_term",
      "term": "Human Agency",
      "slug": "human-agency",
      "definition": "The capacity of individuals to make autonomous, informed decisions and exercise meaningful control over actions that affect their lives, increasingly at risk as AI systems assume decision-making authority.",
      "url": "https://topaithreats.com/glossary/human-agency/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Agency_(philosophy)",
        "https://www.wikidata.org/wiki/Q3951828"
      ],
      "degree": 14
    },
    {
      "id": "glossary:human-in-the-loop",
      "type": "glossary_term",
      "term": "Human-in-the-Loop",
      "slug": "human-in-the-loop",
      "definition": "A design principle requiring meaningful human oversight and intervention at critical decision points in AI-driven processes.",
      "url": "https://topaithreats.com/glossary/human-in-the-loop/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Human-in-the-loop",
        "https://www.wikidata.org/wiki/Q5936775"
      ],
      "degree": 35
    },
    {
      "id": "glossary:indirect-prompt-injection",
      "type": "glossary_term",
      "term": "Indirect Prompt Injection",
      "slug": "indirect-prompt-injection",
      "definition": "A class of prompt injection attack where malicious instructions are embedded in external data sources — such as web pages, documents, emails, or database records — that an AI system retrieves and processes, causing the model to execute the attacker's instructions without the user's knowledge or intent.",
      "url": "https://topaithreats.com/glossary/indirect-prompt-injection/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Prompt_injection"
      ],
      "degree": 5
    },
    {
      "id": "glossary:information-ecosystem",
      "type": "glossary_term",
      "term": "Information Ecosystem",
      "slug": "information-ecosystem",
      "definition": "The interconnected network of media, platforms, institutions, and individuals through which information is created, distributed, consumed, and verified within a society.",
      "url": "https://topaithreats.com/glossary/information-ecosystem/",
      "degree": 6
    },
    {
      "id": "glossary:information-integrity",
      "type": "glossary_term",
      "term": "Information Integrity",
      "slug": "information-integrity",
      "definition": "The trustworthiness, accuracy, and reliability of information within digital systems and public discourse, encompassing both the factual correctness of content and the authenticity of its provenance.",
      "url": "https://topaithreats.com/glossary/information-integrity/",
      "degree": 6
    },
    {
      "id": "glossary:infrastructure-dependency",
      "type": "glossary_term",
      "term": "Infrastructure Dependency",
      "slug": "infrastructure-dependency",
      "definition": "Critical reliance of essential services on shared AI systems, creating vulnerability to widespread failure if those systems malfunction, degrade, or become unavailable.",
      "url": "https://topaithreats.com/glossary/infrastructure-dependency/",
      "degree": 6
    },
    {
      "id": "glossary:input-validation",
      "type": "glossary_term",
      "term": "Input Validation",
      "slug": "input-validation",
      "definition": "The process of verifying that data received by an AI system conforms to expected formats, constraints, and safety requirements before it is processed. In AI contexts, input validation extends beyond traditional type-checking to include prompt filtering, injection detection, content policy enforcement, and semantic boundary verification.",
      "url": "https://topaithreats.com/glossary/input-validation/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Data_validation"
      ],
      "degree": 5
    },
    {
      "id": "glossary:institutional-trust",
      "type": "glossary_term",
      "term": "Institutional Trust",
      "slug": "institutional-trust",
      "definition": "Public confidence in the reliability, competence, and good faith of societal institutions including government, media, scientific bodies, and the judiciary, which AI-enabled threats can systematically erode.",
      "url": "https://topaithreats.com/glossary/institutional-trust/",
      "degree": 5
    },
    {
      "id": "glossary:instruction-hierarchy",
      "type": "glossary_term",
      "term": "Instruction Hierarchy",
      "slug": "instruction-hierarchy",
      "definition": "A security mechanism for large language models that establishes a priority ordering among different instruction sources — typically system prompt (highest priority), user messages (medium), and retrieved content or tool outputs (lowest) — to prevent lower-priority instructions from overriding higher-priority ones.",
      "url": "https://topaithreats.com/glossary/instruction-hierarchy/",
      "degree": 4
    },
    {
      "id": "glossary:instrumental-convergence",
      "type": "glossary_term",
      "term": "Instrumental Convergence",
      "slug": "instrumental-convergence",
      "definition": "The hypothesis that sufficiently advanced AI systems pursuing a wide range of final goals would converge on acquiring certain instrumental sub-goals — including self-preservation, resource acquisition, and goal stability — because these sub-goals are useful for achieving almost any terminal objective.",
      "url": "https://topaithreats.com/glossary/instrumental-convergence/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Instrumental_convergence"
      ],
      "degree": 6
    },
    {
      "id": "glossary:international-humanitarian-law",
      "type": "glossary_term",
      "term": "International Humanitarian Law",
      "slug": "international-humanitarian-law",
      "definition": "The body of international law governing armed conflict, including rules on distinction, proportionality, and precaution, whose application to AI-enabled weapons systems raises fundamental questions of compliance and accountability.",
      "url": "https://topaithreats.com/glossary/international-humanitarian-law/",
      "same_as": [
        "https://en.wikipedia.org/wiki/International_humanitarian_law",
        "https://www.wikidata.org/wiki/Q381148"
      ],
      "degree": 6
    },
    {
      "id": "glossary:jailbreak-attack",
      "type": "glossary_term",
      "term": "Jailbreak Attack",
      "slug": "jailbreak-attack",
      "definition": "A technique that circumvents an AI model's built-in safety alignment and content policies to elicit restricted or harmful outputs.",
      "url": "https://topaithreats.com/glossary/jailbreak-attack/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Jailbreaking_(large_language_models)"
      ],
      "degree": 13
    },
    {
      "id": "glossary:job-displacement",
      "type": "glossary_term",
      "term": "Job Displacement",
      "slug": "job-displacement",
      "definition": "The elimination, significant degradation, or structural transformation of human employment as AI-driven automation replaces tasks, roles, or entire occupational categories previously performed by workers.",
      "url": "https://topaithreats.com/glossary/job-displacement/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Technological_unemployment",
        "https://www.wikidata.org/wiki/Q7692538"
      ],
      "degree": 10
    },
    {
      "id": "glossary:large-language-model",
      "type": "glossary_term",
      "term": "Large Language Model",
      "slug": "large-language-model",
      "definition": "A neural network trained on massive text datasets to generate, summarise, and reason about natural language.",
      "url": "https://topaithreats.com/glossary/large-language-model/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Large_language_model",
        "https://www.wikidata.org/wiki/Q115305900"
      ],
      "degree": 56
    },
    {
      "id": "glossary:least-privilege",
      "type": "glossary_term",
      "term": "Least Privilege",
      "slug": "least-privilege",
      "definition": "A security principle requiring that any entity — user, process, or AI agent — is granted only the minimum permissions necessary to perform its intended function and no more. Applied to AI systems, least privilege constrains model access to tools, data, APIs, and system resources to reduce the blast radius of compromise or misuse.",
      "url": "https://topaithreats.com/glossary/least-privilege/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Principle_of_least_privilege",
        "https://www.wikidata.org/wiki/Q516475"
      ],
      "degree": 7
    },
    {
      "id": "glossary:lethal-autonomous-weapon",
      "type": "glossary_term",
      "term": "Lethal Autonomous Weapon",
      "slug": "lethal-autonomous-weapon",
      "definition": "A weapon system that can select and engage targets without meaningful human control over the critical functions of target identification, tracking, and attack execution.",
      "url": "https://topaithreats.com/glossary/lethal-autonomous-weapon/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Lethal_autonomous_weapon"
      ],
      "degree": 7
    },
    {
      "id": "glossary:laws",
      "type": "glossary_term",
      "term": "Lethal Autonomous Weapon Systems (LAWS)",
      "slug": "laws",
      "definition": "Weapons systems that can independently select and engage targets without meaningful human control over individual attack decisions, raising fundamental legal, ethical, and security concerns.",
      "url": "https://topaithreats.com/glossary/laws/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Lethal_autonomous_weapon",
        "https://www.wikidata.org/wiki/Q25378861"
      ],
      "degree": 6
    },
    {
      "id": "glossary:liars-dividend",
      "type": "glossary_term",
      "term": "Liar's Dividend",
      "slug": "liars-dividend",
      "definition": "The phenomenon where the mere existence of deepfakes and AI-generated media allows individuals to dismiss authentic evidence — including genuine photographs, videos, and audio recordings — as potentially fabricated. The liar's dividend erodes the evidentiary value of all digital media, benefiting those who wish to deny documented events.",
      "url": "https://topaithreats.com/glossary/liars-dividend/",
      "degree": 5
    },
    {
      "id": "glossary:malware",
      "type": "glossary_term",
      "term": "Malware",
      "slug": "malware",
      "definition": "Malicious software designed to infiltrate, damage, or gain unauthorized access to computer systems. In the context of AI threats, malware increasingly leverages machine learning to evade detection, adapt to defenses, and automate attack strategies.",
      "url": "https://topaithreats.com/glossary/malware/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Malware",
        "https://www.wikidata.org/wiki/Q14001"
      ],
      "degree": 9
    },
    {
      "id": "glossary:manipulative-design",
      "type": "glossary_term",
      "term": "Manipulative Design",
      "slug": "manipulative-design",
      "definition": "Interface patterns that exploit cognitive biases and AI personalisation to steer user behaviour against their interests, undermining informed consent and autonomous decision-making.",
      "url": "https://topaithreats.com/glossary/manipulative-design/",
      "degree": 7
    },
    {
      "id": "glossary:market-manipulation",
      "type": "glossary_term",
      "term": "Market Manipulation",
      "slug": "market-manipulation",
      "definition": "The use of AI systems to artificially influence the price, volume, or conditions of financial markets through algorithmic trading strategies, coordinated information campaigns, or exploitation of market microstructure vulnerabilities.",
      "url": "https://topaithreats.com/glossary/market-manipulation/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Market_manipulation",
        "https://www.wikidata.org/wiki/Q400707"
      ],
      "degree": 11
    },
    {
      "id": "glossary:market-power",
      "type": "glossary_term",
      "term": "Market Power",
      "slug": "market-power",
      "definition": "The ability of dominant AI firms to control market conditions, pricing, and access to essential AI infrastructure and data, concentrating economic influence in ways that limit competition and innovation.",
      "url": "https://topaithreats.com/glossary/market-power/",
      "degree": 6
    },
    {
      "id": "glossary:mass-surveillance",
      "type": "glossary_term",
      "term": "Mass Surveillance",
      "slug": "mass-surveillance",
      "definition": "Broad, indiscriminate monitoring of populations using AI technologies such as facial recognition and communications interception.",
      "url": "https://topaithreats.com/glossary/mass-surveillance/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Mass_surveillance",
        "https://www.wikidata.org/wiki/Q1425056"
      ],
      "degree": 17
    },
    {
      "id": "glossary:media-manipulation",
      "type": "glossary_term",
      "term": "Media Manipulation",
      "slug": "media-manipulation",
      "definition": "The deliberate alteration or fabrication of media content using AI to deceive, mislead, or influence public perception, encompassing deepfakes, synthetic text, and manipulated imagery.",
      "url": "https://topaithreats.com/glossary/media-manipulation/",
      "degree": 6
    },
    {
      "id": "glossary:membership-inference",
      "type": "glossary_term",
      "term": "Membership Inference",
      "slug": "membership-inference",
      "definition": "An attack technique that determines whether a specific data record was included in an AI model's training dataset, potentially revealing sensitive information about individuals whose data was used.",
      "url": "https://topaithreats.com/glossary/membership-inference/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Membership_inference_attack"
      ],
      "degree": 6
    },
    {
      "id": "glossary:memory-poisoning",
      "type": "glossary_term",
      "term": "Memory Poisoning",
      "slug": "memory-poisoning",
      "definition": "The deliberate corruption of an AI agent's persistent memory, context window, or stored state to manipulate its future decisions, outputs, or behavior without the agent or its operators detecting the alteration.",
      "url": "https://topaithreats.com/glossary/memory-poisoning/",
      "degree": 8
    },
    {
      "id": "glossary:misalignment",
      "type": "glossary_term",
      "term": "Misalignment",
      "slug": "misalignment",
      "definition": "A condition in which an AI system's operational behaviour diverges from the objectives, values, or intentions specified by its designers, potentially causing unintended harm at varying scales.",
      "url": "https://topaithreats.com/glossary/misalignment/",
      "same_as": [
        "https://en.wikipedia.org/wiki/AI_alignment",
        "https://www.wikidata.org/wiki/Q24882728"
      ],
      "degree": 15
    },
    {
      "id": "glossary:misinformation",
      "type": "glossary_term",
      "term": "Misinformation",
      "slug": "misinformation",
      "definition": "False or inaccurate information spread without deliberate intent to deceive, distinct from disinformation which involves intentional deception. AI-generated hallucinations represent a major and growing source.",
      "url": "https://topaithreats.com/glossary/misinformation/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Misinformation",
        "https://www.wikidata.org/wiki/Q13579947"
      ],
      "degree": 17
    },
    {
      "id": "glossary:mitre-atlas",
      "type": "glossary_term",
      "term": "MITRE ATLAS",
      "slug": "mitre-atlas",
      "definition": "The Adversarial Threat Landscape for AI Systems (ATLAS) is a knowledge base maintained by MITRE Corporation that catalogues adversarial tactics, techniques, and procedures (TTPs) targeting machine learning systems. Modelled on the MITRE ATT&CK framework for cybersecurity, ATLAS provides a structured taxonomy of AI-specific attacks with documented case studies.",
      "url": "https://topaithreats.com/glossary/mitre-atlas/",
      "same_as": [
        "https://en.wikipedia.org/wiki/MITRE_Corporation"
      ],
      "degree": 6
    },
    {
      "id": "glossary:model-context-protocol",
      "type": "glossary_term",
      "term": "Model Context Protocol",
      "slug": "model-context-protocol",
      "definition": "An open protocol, developed by Anthropic, that standardises how AI applications connect to external data sources and tools. MCP provides a universal interface for language models to access databases, APIs, file systems, and other services through a client-server architecture, replacing fragmented custom integrations.",
      "url": "https://topaithreats.com/glossary/model-context-protocol/",
      "degree": 6
    },
    {
      "id": "glossary:model-inversion",
      "type": "glossary_term",
      "term": "Model Inversion",
      "slug": "model-inversion",
      "definition": "An attack technique that reconstructs private or sensitive information from a machine learning model's training data by systematically analyzing the model's outputs, predictions, or confidence scores.",
      "url": "https://topaithreats.com/glossary/model-inversion/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Model_inversion_attack"
      ],
      "degree": 7
    },
    {
      "id": "glossary:model-provenance",
      "type": "glossary_term",
      "term": "Model Provenance",
      "slug": "model-provenance",
      "definition": "The documented chain of custody for an AI model — tracing its origin, training data, fine-tuning history, and distribution path to verify integrity and authenticity.",
      "url": "https://topaithreats.com/glossary/model-provenance/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Provenance"
      ],
      "degree": 5
    },
    {
      "id": "glossary:multi-agent-system",
      "type": "glossary_term",
      "term": "Multi-Agent System",
      "slug": "multi-agent-system",
      "definition": "A computational architecture in which multiple autonomous AI agents interact, cooperate, or compete to accomplish tasks. These systems introduce emergent risks from coordination failures, conflicting objectives, and cascading errors between agents.",
      "url": "https://topaithreats.com/glossary/multi-agent-system/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Multi-agent_system",
        "https://www.wikidata.org/wiki/Q529909"
      ],
      "degree": 14
    },
    {
      "id": "glossary:non-consensual-intimate-imagery",
      "type": "glossary_term",
      "term": "Non-Consensual Intimate Imagery",
      "slug": "non-consensual-intimate-imagery",
      "definition": "Sexually explicit images or videos created or distributed without the depicted person's consent, increasingly generated using AI deepfake tools.",
      "url": "https://topaithreats.com/glossary/non-consensual-intimate-imagery/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Non-consensual_intimate_imagery"
      ],
      "degree": 8
    },
    {
      "id": "glossary:output-sandboxing",
      "type": "glossary_term",
      "term": "Output Sandboxing",
      "slug": "output-sandboxing",
      "definition": "A security control that constrains and validates the outputs of an AI system before they are executed, displayed, or passed to downstream systems. Output sandboxing prevents AI-generated content — including code, tool calls, and formatted text — from causing unintended effects outside a controlled environment.",
      "url": "https://topaithreats.com/glossary/output-sandboxing/",
      "degree": 7
    },
    {
      "id": "glossary:overreliance",
      "type": "glossary_term",
      "term": "Overreliance",
      "slug": "overreliance",
      "definition": "Excessive dependence on AI system outputs without adequate independent verification or critical evaluation, leading to unchecked errors and diminished human judgment capacity.",
      "url": "https://topaithreats.com/glossary/overreliance/",
      "degree": 7
    },
    {
      "id": "glossary:owasp-top-10-llm",
      "type": "glossary_term",
      "term": "OWASP Top 10 for LLM Applications",
      "slug": "owasp-top-10-llm",
      "definition": "A security awareness document published by the Open Web Application Security Project (OWASP) that identifies the ten most critical security vulnerabilities specific to applications built on large language models. The list provides standardised vulnerability descriptions, risk ratings, and mitigation guidance for LLM-integrated systems.",
      "url": "https://topaithreats.com/glossary/owasp-top-10-llm/",
      "same_as": [
        "https://en.wikipedia.org/wiki/OWASP"
      ],
      "degree": 6
    },
    {
      "id": "glossary:persistent-memory",
      "type": "glossary_term",
      "term": "Persistent Memory",
      "slug": "persistent-memory",
      "definition": "The capacity of AI agents to retain and recall information across interactions, enabling continuity of context but creating new attack surfaces for data poisoning and unauthorized knowledge accumulation.",
      "url": "https://topaithreats.com/glossary/persistent-memory/",
      "degree": 6
    },
    {
      "id": "glossary:persuasive-technology",
      "type": "glossary_term",
      "term": "Persuasive Technology",
      "slug": "persuasive-technology",
      "definition": "Systems designed to change user attitudes or behaviours through AI-powered personalisation, nudging, and emotional targeting, raising concerns about autonomy and informed consent.",
      "url": "https://topaithreats.com/glossary/persuasive-technology/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Persuasive_technology",
        "https://www.wikidata.org/wiki/Q2902255"
      ],
      "degree": 6
    },
    {
      "id": "glossary:phishing",
      "type": "glossary_term",
      "term": "Phishing",
      "slug": "phishing",
      "definition": "A social engineering attack using fraudulent messages to trick recipients into revealing credentials, installing malware, or transferring funds.",
      "url": "https://topaithreats.com/glossary/phishing/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Phishing",
        "https://www.wikidata.org/wiki/Q135005"
      ],
      "degree": 14
    },
    {
      "id": "glossary:polymorphic-malware",
      "type": "glossary_term",
      "term": "Polymorphic Malware",
      "slug": "polymorphic-malware",
      "definition": "Malicious software that uses AI to continuously alter its code signature while maintaining functionality, evading detection by signature-based and AI-powered security systems.",
      "url": "https://topaithreats.com/glossary/polymorphic-malware/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Polymorphic_code",
        "https://www.wikidata.org/wiki/Q950981"
      ],
      "degree": 6
    },
    {
      "id": "glossary:price-fixing",
      "type": "glossary_term",
      "term": "Price Fixing",
      "slug": "price-fixing",
      "definition": "AI-facilitated coordination of pricing among competitors, whether through explicit collusion or emergent algorithmic convergence that produces cartel-like outcomes without direct human agreement.",
      "url": "https://topaithreats.com/glossary/price-fixing/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Price_fixing",
        "https://www.wikidata.org/wiki/Q1200230"
      ],
      "degree": 5
    },
    {
      "id": "glossary:privilege-escalation",
      "type": "glossary_term",
      "term": "Privilege Escalation",
      "slug": "privilege-escalation",
      "definition": "The exploitation of a system vulnerability or misconfiguration to gain elevated access rights beyond those originally authorized. In AI contexts, this includes AI agents acquiring capabilities or permissions that exceed their intended operational boundaries.",
      "url": "https://topaithreats.com/glossary/privilege-escalation/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Privilege_escalation",
        "https://www.wikidata.org/wiki/Q1856893"
      ],
      "degree": 11
    },
    {
      "id": "glossary:profiling",
      "type": "glossary_term",
      "term": "Profiling",
      "slug": "profiling",
      "definition": "The automated processing of personal data to evaluate, categorise, or predict individual characteristics and behaviour, enabling targeted decisions that may affect rights and opportunities.",
      "url": "https://topaithreats.com/glossary/profiling/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Profiling_(information_science)",
        "https://www.wikidata.org/wiki/Q7248282"
      ],
      "degree": 6
    },
    {
      "id": "glossary:prompt-injection",
      "type": "glossary_term",
      "term": "Prompt Injection",
      "slug": "prompt-injection",
      "definition": "An attack that inserts adversarial instructions into an AI model's input to override its intended behaviour, bypass safety constraints, or extract restricted information.",
      "url": "https://topaithreats.com/glossary/prompt-injection/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Prompt_injection",
        "https://www.wikidata.org/wiki/Q116737628"
      ],
      "degree": 39
    },
    {
      "id": "glossary:propaganda",
      "type": "glossary_term",
      "term": "Propaganda",
      "slug": "propaganda",
      "definition": "Deliberately crafted messaging designed to influence public opinion, now amplified by AI-generated content and automated distribution at unprecedented speed and scale.",
      "url": "https://topaithreats.com/glossary/propaganda/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Propaganda",
        "https://www.wikidata.org/wiki/Q7281"
      ],
      "degree": 6
    },
    {
      "id": "glossary:protected-characteristics",
      "type": "glossary_term",
      "term": "Protected Characteristics",
      "slug": "protected-characteristics",
      "definition": "Legally defined attributes such as race, gender, age, disability, and religion that anti-discrimination law prohibits as bases for adverse treatment in decisions affecting individuals.",
      "url": "https://topaithreats.com/glossary/protected-characteristics/",
      "degree": 6
    },
    {
      "id": "glossary:proxy-discrimination",
      "type": "glossary_term",
      "term": "Proxy Discrimination",
      "slug": "proxy-discrimination",
      "definition": "A form of algorithmic discrimination where AI systems use ostensibly neutral variables that correlate with protected characteristics, producing biased outcomes without explicitly referencing protected attributes.",
      "url": "https://topaithreats.com/glossary/proxy-discrimination/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Proxy_discrimination"
      ],
      "degree": 13
    },
    {
      "id": "glossary:proxy-variable",
      "type": "glossary_term",
      "term": "Proxy Variable",
      "slug": "proxy-variable",
      "definition": "A data attribute that correlates with a protected characteristic, enabling indirect algorithmic discrimination even when the protected attribute is excluded.",
      "url": "https://topaithreats.com/glossary/proxy-variable/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Proxy_(statistics)",
        "https://www.wikidata.org/wiki/Q1432581"
      ],
      "degree": 7
    },
    {
      "id": "glossary:pseudonymization",
      "type": "glossary_term",
      "term": "Pseudonymization",
      "slug": "pseudonymization",
      "definition": "Replacing direct identifiers in datasets with artificial identifiers while maintaining data utility, a privacy-enhancing technique required by GDPR but vulnerable to AI-powered re-identification.",
      "url": "https://topaithreats.com/glossary/pseudonymization/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Pseudonymization",
        "https://www.wikidata.org/wiki/Q567618"
      ],
      "degree": 7
    },
    {
      "id": "glossary:re-identification",
      "type": "glossary_term",
      "term": "Re-Identification",
      "slug": "re-identification",
      "definition": "The process of linking supposedly anonymised or de-identified data back to specific individuals, a capability dramatically enhanced by AI techniques that can cross-reference diverse data sources.",
      "url": "https://topaithreats.com/glossary/re-identification/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Data_re-identification",
        "https://www.wikidata.org/wiki/Q30314042"
      ],
      "degree": 14
    },
    {
      "id": "glossary:recommendation-system",
      "type": "glossary_term",
      "term": "Recommendation System",
      "slug": "recommendation-system",
      "definition": "AI systems that suggest content, products, or actions to users based on predicted preferences, shaping information exposure and individual choices at scale.",
      "url": "https://topaithreats.com/glossary/recommendation-system/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Recommender_system",
        "https://www.wikidata.org/wiki/Q554950"
      ],
      "degree": 6
    },
    {
      "id": "glossary:recursive-self-improvement",
      "type": "glossary_term",
      "term": "Recursive Self-Improvement",
      "slug": "recursive-self-improvement",
      "definition": "A theoretical AI capability in which a system iteratively enhances its own architecture or reasoning, potentially leading to rapid capability gains.",
      "url": "https://topaithreats.com/glossary/recursive-self-improvement/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Recursive_self-improvement",
        "https://www.wikidata.org/wiki/Q1768494"
      ],
      "degree": 10
    },
    {
      "id": "glossary:red-teaming",
      "type": "glossary_term",
      "term": "Red Teaming",
      "slug": "red-teaming",
      "definition": "Structured adversarial testing of AI systems to identify vulnerabilities, safety failures, and harmful capabilities before deployment.",
      "url": "https://topaithreats.com/glossary/red-teaming/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Red_team",
        "https://www.wikidata.org/wiki/Q7305396"
      ],
      "degree": 7
    },
    {
      "id": "glossary:remote-code-execution",
      "type": "glossary_term",
      "term": "Remote Code Execution",
      "slug": "remote-code-execution",
      "definition": "A class of security vulnerability that allows an attacker to run arbitrary code on a target system from a remote location. In AI contexts, remote code execution risks arise when language models with code execution capabilities are manipulated through prompt injection or tool misuse to execute attacker-controlled commands.",
      "url": "https://topaithreats.com/glossary/remote-code-execution/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Arbitrary_code_execution",
        "https://www.wikidata.org/wiki/Q4786079"
      ],
      "degree": 9
    },
    {
      "id": "glossary:representation-gap",
      "type": "glossary_term",
      "term": "Representation Gap",
      "slug": "representation-gap",
      "definition": "Significant disparities between groups in training data coverage, leading to AI systems that perform poorly or produce biased outcomes for underrepresented populations.",
      "url": "https://topaithreats.com/glossary/representation-gap/",
      "degree": 6
    },
    {
      "id": "glossary:representational-harm",
      "type": "glossary_term",
      "term": "Representational Harm",
      "slug": "representational-harm",
      "definition": "Harm that occurs when AI systems reinforce stereotypes, erase identities, or demean social groups through biased outputs, even in the absence of direct material consequences.",
      "url": "https://topaithreats.com/glossary/representational-harm/",
      "degree": 13
    },
    {
      "id": "glossary:retrieval-augmented-generation",
      "type": "glossary_term",
      "term": "Retrieval-Augmented Generation (RAG)",
      "slug": "retrieval-augmented-generation",
      "definition": "An architecture that enhances language model responses by retrieving relevant documents from external knowledge bases and including them in the model's context window alongside the user's query.",
      "url": "https://topaithreats.com/glossary/retrieval-augmented-generation/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Retrieval-augmented_generation"
      ],
      "degree": 10
    },
    {
      "id": "glossary:reward-hacking",
      "type": "glossary_term",
      "term": "Reward Hacking",
      "slug": "reward-hacking",
      "definition": "When an AI agent finds unintended ways to maximise its reward signal that satisfy the formal objective but violate the designer's actual intent, exploiting gaps between specified and intended goals.",
      "url": "https://topaithreats.com/glossary/reward-hacking/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Reward_hacking",
        "https://www.wikidata.org/wiki/Q123900749"
      ],
      "degree": 11
    },
    {
      "id": "glossary:rlhf",
      "type": "glossary_term",
      "term": "RLHF (Reinforcement Learning from Human Feedback)",
      "slug": "rlhf",
      "definition": "A training technique that aligns language model behavior with human preferences by using human evaluators to rank model outputs, then training the model to prefer higher-ranked responses.",
      "url": "https://topaithreats.com/glossary/rlhf/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Reinforcement_learning_from_human_feedback"
      ],
      "degree": 7
    },
    {
      "id": "glossary:robocall",
      "type": "glossary_term",
      "term": "Robocall",
      "slug": "robocall",
      "definition": "An automated telephone call delivering a pre-recorded or AI-synthesised message, increasingly used in fraud, scams, and disinformation campaigns.",
      "url": "https://topaithreats.com/glossary/robocall/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Robocall",
        "https://www.wikidata.org/wiki/Q7353270"
      ],
      "degree": 6
    },
    {
      "id": "glossary:robustness",
      "type": "glossary_term",
      "term": "Robustness",
      "slug": "robustness",
      "definition": "The ability of an AI system to maintain correct and reliable performance when faced with adversarial inputs, distribution shifts, or unexpected operating conditions.",
      "url": "https://topaithreats.com/glossary/robustness/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Robustness_(computer_science)",
        "https://www.wikidata.org/wiki/Q2524240"
      ],
      "degree": 9
    },
    {
      "id": "glossary:safety-critical",
      "type": "glossary_term",
      "term": "Safety-Critical",
      "slug": "safety-critical",
      "definition": "Systems where AI failure could result in death, serious injury, or significant environmental damage, requiring the highest standards of testing, oversight, and human control.",
      "url": "https://topaithreats.com/glossary/safety-critical/",
      "degree": 6
    },
    {
      "id": "glossary:self-determination",
      "type": "glossary_term",
      "term": "Self-Determination",
      "slug": "self-determination",
      "definition": "The right and capacity of individuals to make meaningful choices about their own lives without undue influence or constraint from automated systems.",
      "url": "https://topaithreats.com/glossary/self-determination/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Self-determination",
        "https://www.wikidata.org/wiki/Q156595"
      ],
      "degree": 6
    },
    {
      "id": "glossary:self-replication",
      "type": "glossary_term",
      "term": "Self-Replication",
      "slug": "self-replication",
      "definition": "The ability of an AI system to autonomously create copies of itself, including its model weights, code, or operational configuration, on new compute infrastructure without explicit human authorisation. Self-replication is an emergent capability concern for advanced AI systems, particularly agentic systems with access to code execution and network resources.",
      "url": "https://topaithreats.com/glossary/self-replication/",
      "degree": 5
    },
    {
      "id": "glossary:sensitive-data",
      "type": "glossary_term",
      "term": "Sensitive Data",
      "slug": "sensitive-data",
      "definition": "Personal information revealing racial origin, political opinions, health status, sexual orientation, or other characteristics that require heightened protection under data protection law.",
      "url": "https://topaithreats.com/glossary/sensitive-data/",
      "degree": 7
    },
    {
      "id": "glossary:single-point-of-failure",
      "type": "glossary_term",
      "term": "Single Point of Failure",
      "slug": "single-point-of-failure",
      "definition": "A component whose failure causes an entire system to stop functioning, particularly concerning when AI systems or their underlying infrastructure become critical dependencies without adequate redundancy.",
      "url": "https://topaithreats.com/glossary/single-point-of-failure/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Single_point_of_failure",
        "https://www.wikidata.org/wiki/Q1363179"
      ],
      "degree": 5
    },
    {
      "id": "glossary:smishing",
      "type": "glossary_term",
      "term": "Smishing",
      "slug": "smishing",
      "definition": "A phishing attack conducted via SMS text messages, often using AI to generate convincing, contextually relevant lures.",
      "url": "https://topaithreats.com/glossary/smishing/",
      "same_as": [
        "https://en.wikipedia.org/wiki/SMS_phishing",
        "https://www.wikidata.org/wiki/Q2571495"
      ],
      "degree": 5
    },
    {
      "id": "glossary:social-engineering",
      "type": "glossary_term",
      "term": "Social Engineering",
      "slug": "social-engineering",
      "definition": "Psychological manipulation techniques that exploit human trust, authority, and urgency to trick individuals into revealing credentials, authorizing transactions, or granting system access.",
      "url": "https://topaithreats.com/glossary/social-engineering/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Social_engineering_(security)"
      ],
      "degree": 8
    },
    {
      "id": "glossary:social-scoring",
      "type": "glossary_term",
      "term": "Social Scoring",
      "slug": "social-scoring",
      "definition": "AI systems that assign scores to individuals based on behaviour, social connections, or personal characteristics, used to determine access to services, opportunities, or freedoms.",
      "url": "https://topaithreats.com/glossary/social-scoring/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Social_Credit_System"
      ],
      "degree": 6
    },
    {
      "id": "glossary:specification-gaming",
      "type": "glossary_term",
      "term": "Specification Gaming",
      "slug": "specification-gaming",
      "definition": "A failure mode in which an AI system finds an unintended way to achieve high scores on its specified objective without fulfilling the designer's actual intent. The system exploits loopholes, ambiguities, or oversights in the reward function or evaluation criteria to satisfy the literal specification while violating its spirit.",
      "url": "https://topaithreats.com/glossary/specification-gaming/",
      "degree": 6
    },
    {
      "id": "glossary:stereotyping",
      "type": "glossary_term",
      "term": "Stereotyping",
      "slug": "stereotyping",
      "definition": "AI systems reproducing or amplifying oversimplified, generalised characterisations of social groups in their outputs, reinforcing harmful preconceptions at scale.",
      "url": "https://topaithreats.com/glossary/stereotyping/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Stereotype",
        "https://www.wikidata.org/wiki/Q167172"
      ],
      "degree": 6
    },
    {
      "id": "glossary:superintelligence",
      "type": "glossary_term",
      "term": "Superintelligence",
      "slug": "superintelligence",
      "definition": "A hypothetical AI system that surpasses human cognitive ability across virtually all domains, including reasoning, planning, and social intelligence.",
      "url": "https://topaithreats.com/glossary/superintelligence/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Superintelligence",
        "https://www.wikidata.org/wiki/Q1566000"
      ],
      "degree": 11
    },
    {
      "id": "glossary:supply-chain-attack",
      "type": "glossary_term",
      "term": "Supply Chain Attack",
      "slug": "supply-chain-attack",
      "definition": "An attack that compromises a system by tampering with upstream components — model weights, datasets, software packages, or tool configurations — before they reach the deploying organization.",
      "url": "https://topaithreats.com/glossary/supply-chain-attack/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Supply_chain_attack"
      ],
      "degree": 13
    },
    {
      "id": "glossary:synthetic-identity",
      "type": "glossary_term",
      "term": "Synthetic Identity",
      "slug": "synthetic-identity",
      "definition": "A fabricated identity constructed by combining real and fictitious personal information — such as genuine Social Security numbers with fake names and addresses — or by using AI-generated biometric data (face images, voice prints) to create a persona that does not correspond to any real individual but can pass identity verification systems.",
      "url": "https://topaithreats.com/glossary/synthetic-identity/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Synthetic_identity_theft"
      ],
      "degree": 4
    },
    {
      "id": "glossary:synthetic-media",
      "type": "glossary_term",
      "term": "Synthetic Media",
      "slug": "synthetic-media",
      "definition": "Media content — video, audio, images, or text — wholly or partially generated or manipulated by AI.",
      "url": "https://topaithreats.com/glossary/synthetic-media/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Synthetic_media",
        "https://www.wikidata.org/wiki/Q96407327"
      ],
      "degree": 41
    },
    {
      "id": "glossary:system-prompt",
      "type": "glossary_term",
      "term": "System Prompt",
      "slug": "system-prompt",
      "definition": "A set of instructions provided to a language model by the application developer that defines the model's role, behavior constraints, and operational context — distinct from user input but processed in the same token stream.",
      "url": "https://topaithreats.com/glossary/system-prompt/",
      "degree": 7
    },
    {
      "id": "glossary:systemic-risk",
      "type": "glossary_term",
      "term": "Systemic Risk",
      "slug": "systemic-risk",
      "definition": "The risk that failure, disruption, or unintended behaviour in one component of the AI ecosystem propagates across interconnected systems and institutions, causing widespread harm that exceeds the sum of individual failures.",
      "url": "https://topaithreats.com/glossary/systemic-risk/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Systemic_risk",
        "https://www.wikidata.org/wiki/Q1369234"
      ],
      "degree": 5
    },
    {
      "id": "glossary:tracking",
      "type": "glossary_term",
      "term": "Tracking",
      "slug": "tracking",
      "definition": "Continuous monitoring of individual location, activity, or digital behaviour by AI systems, often conducted without meaningful consent or awareness.",
      "url": "https://topaithreats.com/glossary/tracking/",
      "degree": 6
    },
    {
      "id": "glossary:training-data",
      "type": "glossary_term",
      "term": "Training Data",
      "slug": "training-data",
      "definition": "The datasets used to train machine learning models, whose quality and representativeness directly influence model behaviour, biases, and harms.",
      "url": "https://topaithreats.com/glossary/training-data/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Training,_validation,_and_test_data_sets",
        "https://www.wikidata.org/wiki/Q3985153"
      ],
      "degree": 31
    },
    {
      "id": "glossary:transfer-learning",
      "type": "glossary_term",
      "term": "Transfer Learning",
      "slug": "transfer-learning",
      "definition": "A machine learning technique where a model trained on one task or dataset is adapted to perform a different but related task, leveraging the knowledge acquired during initial training. Transfer learning is the foundational principle behind fine-tuning and the use of pre-trained foundation models across diverse applications.",
      "url": "https://topaithreats.com/glossary/transfer-learning/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Transfer_learning",
        "https://www.wikidata.org/wiki/Q7833851"
      ],
      "degree": 5
    },
    {
      "id": "glossary:trust-erosion",
      "type": "glossary_term",
      "term": "Trust Erosion",
      "slug": "trust-erosion",
      "definition": "The cumulative degradation of public confidence in institutions, media, information systems, and shared epistemic frameworks, accelerated by the proliferation of AI-generated synthetic content and automated manipulation.",
      "url": "https://topaithreats.com/glossary/trust-erosion/",
      "degree": 10
    },
    {
      "id": "glossary:vendor-lock-in",
      "type": "glossary_term",
      "term": "Vendor Lock-In",
      "slug": "vendor-lock-in",
      "definition": "Dependency on a single AI provider's proprietary models, tools, or infrastructure that creates prohibitively high switching costs and reduces organisational autonomy.",
      "url": "https://topaithreats.com/glossary/vendor-lock-in/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Vendor_lock-in",
        "https://www.wikidata.org/wiki/Q1503227"
      ],
      "degree": 6
    },
    {
      "id": "glossary:vishing",
      "type": "glossary_term",
      "term": "Vishing",
      "slug": "vishing",
      "definition": "Voice phishing -- a social engineering attack via telephone, increasingly using AI voice cloning to impersonate trusted individuals.",
      "url": "https://topaithreats.com/glossary/vishing/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Voice_phishing",
        "https://www.wikidata.org/wiki/Q172380"
      ],
      "degree": 13
    },
    {
      "id": "glossary:voice-cloning",
      "type": "glossary_term",
      "term": "Voice Cloning",
      "slug": "voice-cloning",
      "definition": "AI technology that replicates a specific individual's voice to generate realistic synthetic speech.",
      "url": "https://topaithreats.com/glossary/voice-cloning/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Speech_synthesis#Voice_cloning",
        "https://www.wikidata.org/wiki/Q16346"
      ],
      "degree": 30
    },
    {
      "id": "glossary:vulnerability-discovery",
      "type": "glossary_term",
      "term": "Vulnerability Discovery",
      "slug": "vulnerability-discovery",
      "definition": "The use of AI to automatically identify security weaknesses in software, networks, or systems, a dual-use capability that serves both defenders and attackers.",
      "url": "https://topaithreats.com/glossary/vulnerability-discovery/",
      "degree": 7
    },
    {
      "id": "glossary:zero-day",
      "type": "glossary_term",
      "term": "Zero-Day",
      "slug": "zero-day",
      "definition": "A software vulnerability unknown to the vendor and without an available patch, whose discovery by AI dramatically accelerates exploitation timelines and increases the threat to unprotected systems.",
      "url": "https://topaithreats.com/glossary/zero-day/",
      "same_as": [
        "https://en.wikipedia.org/wiki/Zero-day_(computing)"
      ],
      "degree": 6
    }
  ],
  "edges": [
    {
      "source": "domain:DOM-AGT",
      "target": "sub:DOM-AGT-agent-to-agent-propagation",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-AGT-agent-to-agent-propagation",
      "target": "domain:DOM-AGT",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-AGT",
      "target": "sub:DOM-AGT-cascading-hallucinations",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-AGT-cascading-hallucinations",
      "target": "domain:DOM-AGT",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-AGT",
      "target": "sub:DOM-AGT-goal-drift",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-AGT-goal-drift",
      "target": "domain:DOM-AGT",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-AGT",
      "target": "sub:DOM-AGT-memory-poisoning",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-AGT-memory-poisoning",
      "target": "domain:DOM-AGT",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-AGT",
      "target": "sub:DOM-AGT-multi-agent-coordination-failures",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-AGT-multi-agent-coordination-failures",
      "target": "domain:DOM-AGT",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-AGT",
      "target": "sub:DOM-AGT-specification-gaming",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-AGT-specification-gaming",
      "target": "domain:DOM-AGT",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-AGT",
      "target": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "target": "domain:DOM-AGT",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-SOC",
      "target": "sub:DOM-SOC-algorithmic-amplification",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-SOC-algorithmic-amplification",
      "target": "domain:DOM-SOC",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-SOC",
      "target": "sub:DOM-SOC-allocational-harm",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-SOC-allocational-harm",
      "target": "domain:DOM-SOC",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-SOC",
      "target": "sub:DOM-SOC-data-imbalance-bias",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-SOC-data-imbalance-bias",
      "target": "domain:DOM-SOC",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-SOC",
      "target": "sub:DOM-SOC-proxy-discrimination",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-SOC-proxy-discrimination",
      "target": "domain:DOM-SOC",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-SOC",
      "target": "sub:DOM-SOC-representational-harm",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-SOC-representational-harm",
      "target": "domain:DOM-SOC",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-ECO",
      "target": "sub:DOM-ECO-automation-induced-job-degradation",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-ECO-automation-induced-job-degradation",
      "target": "domain:DOM-ECO",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-ECO",
      "target": "sub:DOM-ECO-decision-loop-automation",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-ECO-decision-loop-automation",
      "target": "domain:DOM-ECO",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-ECO",
      "target": "sub:DOM-ECO-economic-dependency-on-black-box-systems",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-ECO-economic-dependency-on-black-box-systems",
      "target": "domain:DOM-ECO",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-ECO",
      "target": "sub:DOM-ECO-market-manipulation-via-ai",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-ECO-market-manipulation-via-ai",
      "target": "domain:DOM-ECO",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-ECO",
      "target": "sub:DOM-ECO-power-data-concentration",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-ECO-power-data-concentration",
      "target": "domain:DOM-ECO",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-CTL",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-CTL-overreliance-automation-bias",
      "target": "domain:DOM-CTL",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-CTL",
      "target": "sub:DOM-CTL-deceptive-manipulative-interfaces",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-CTL-deceptive-manipulative-interfaces",
      "target": "domain:DOM-CTL",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-CTL",
      "target": "sub:DOM-CTL-implicit-authority-transfer",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-CTL-implicit-authority-transfer",
      "target": "domain:DOM-CTL",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-CTL",
      "target": "sub:DOM-CTL-loss-of-human-agency",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-CTL-loss-of-human-agency",
      "target": "domain:DOM-CTL",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-CTL",
      "target": "sub:DOM-CTL-safety-governance-override",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-CTL-safety-governance-override",
      "target": "domain:DOM-CTL",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-CTL",
      "target": "sub:DOM-CTL-unsafe-human-in-the-loop-failures",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-CTL-unsafe-human-in-the-loop-failures",
      "target": "domain:DOM-CTL",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-INF",
      "target": "sub:DOM-INF-ai-enabled-fraud",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-INF-ai-enabled-fraud",
      "target": "domain:DOM-INF",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-INF",
      "target": "sub:DOM-INF-consensus-reality-erosion",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-INF-consensus-reality-erosion",
      "target": "domain:DOM-INF",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-INF",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-INF-deepfake-identity-hijacking",
      "target": "domain:DOM-INF",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-INF",
      "target": "sub:DOM-INF-disinformation-campaigns",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-INF-disinformation-campaigns",
      "target": "domain:DOM-INF",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-INF",
      "target": "sub:DOM-INF-misinformation-hallucinated-content",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-INF-misinformation-hallucinated-content",
      "target": "domain:DOM-INF",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-INF",
      "target": "sub:DOM-INF-synthetic-media-manipulation",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-INF-synthetic-media-manipulation",
      "target": "domain:DOM-INF",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-PRI",
      "target": "sub:DOM-PRI-behavioral-profiling-without-consent",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-PRI-behavioral-profiling-without-consent",
      "target": "domain:DOM-PRI",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-PRI",
      "target": "sub:DOM-PRI-biometric-exploitation",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-PRI-biometric-exploitation",
      "target": "domain:DOM-PRI",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-PRI",
      "target": "sub:DOM-PRI-mass-surveillance-amplification",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-PRI-mass-surveillance-amplification",
      "target": "domain:DOM-PRI",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-PRI",
      "target": "sub:DOM-PRI-re-identification-attacks",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-PRI-re-identification-attacks",
      "target": "domain:DOM-PRI",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-PRI",
      "target": "sub:DOM-PRI-sensitive-attribute-inference",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-PRI-sensitive-attribute-inference",
      "target": "domain:DOM-PRI",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-SEC",
      "target": "sub:DOM-SEC-adversarial-evasion",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-SEC-adversarial-evasion",
      "target": "domain:DOM-SEC",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-SEC",
      "target": "sub:DOM-SEC-ai-supply-chain-attack",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-SEC-ai-supply-chain-attack",
      "target": "domain:DOM-SEC",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-SEC",
      "target": "sub:DOM-SEC-ai-morphed-malware",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-SEC-ai-morphed-malware",
      "target": "domain:DOM-SEC",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-SEC",
      "target": "sub:DOM-SEC-social-engineering-via-ai",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-SEC-social-engineering-via-ai",
      "target": "domain:DOM-SEC",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-SEC",
      "target": "sub:DOM-SEC-automated-vulnerability-discovery",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-SEC-automated-vulnerability-discovery",
      "target": "domain:DOM-SEC",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-SEC",
      "target": "sub:DOM-SEC-data-poisoning",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-SEC-data-poisoning",
      "target": "domain:DOM-SEC",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-SEC",
      "target": "sub:DOM-SEC-jailbreak-guardrail-bypass",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-SEC-jailbreak-guardrail-bypass",
      "target": "domain:DOM-SEC",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-SEC",
      "target": "sub:DOM-SEC-model-inversion-data-extraction",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-SEC-model-inversion-data-extraction",
      "target": "domain:DOM-SEC",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-SEC",
      "target": "sub:DOM-SEC-prompt-injection-attack",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-SEC-prompt-injection-attack",
      "target": "domain:DOM-SEC",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-SYS",
      "target": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "target": "domain:DOM-SYS",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-SYS",
      "target": "sub:DOM-SYS-ai-assisted-biological-threat-design",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-SYS-ai-assisted-biological-threat-design",
      "target": "domain:DOM-SYS",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-SYS",
      "target": "sub:DOM-SYS-infrastructure-dependency-collapse",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-SYS-infrastructure-dependency-collapse",
      "target": "domain:DOM-SYS",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-SYS",
      "target": "sub:DOM-SYS-lethal-autonomous-weapon-systems",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-SYS-lethal-autonomous-weapon-systems",
      "target": "domain:DOM-SYS",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-SYS",
      "target": "sub:DOM-SYS-strategic-misalignment",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-SYS-strategic-misalignment",
      "target": "domain:DOM-SYS",
      "relationship": "belongs_to"
    },
    {
      "source": "domain:DOM-SYS",
      "target": "sub:DOM-SYS-uncontrolled-recursive-self-improvement",
      "relationship": "has_pattern"
    },
    {
      "source": "sub:DOM-SYS-uncontrolled-recursive-self-improvement",
      "target": "domain:DOM-SYS",
      "relationship": "belongs_to"
    },
    {
      "source": "incident:INC-26-0097",
      "target": "sub:DOM-ECO-automation-induced-job-degradation",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0097",
      "target": "sub:DOM-ECO-power-data-concentration",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0097",
      "target": "glossary:job-displacement",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0097",
      "target": "glossary:automated-decision-making",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0097",
      "target": "glossary:agentic-ai",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0074",
      "target": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0074",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0074",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0015",
      "target": "sub:DOM-SEC-ai-supply-chain-attack",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0015",
      "target": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0015",
      "target": "glossary:supply-chain-attack",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0059",
      "target": "sub:DOM-INF-synthetic-media-manipulation",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0059",
      "target": "glossary:deepfake",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0059",
      "target": "glossary:synthetic-media",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0094",
      "target": "sub:DOM-CTL-safety-governance-override",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0094",
      "target": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0094",
      "target": "sub:DOM-ECO-power-data-concentration",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0094",
      "target": "glossary:guardrail",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0043",
      "target": "sub:DOM-AGT-goal-drift",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0043",
      "target": "sub:DOM-CTL-unsafe-human-in-the-loop-failures",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0043",
      "target": "glossary:agentic-ai",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0043",
      "target": "glossary:human-in-the-loop",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0065",
      "target": "sub:DOM-INF-disinformation-campaigns",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0065",
      "target": "glossary:deepfake",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0065",
      "target": "glossary:disinformation",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0065",
      "target": "glossary:synthetic-media",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0047",
      "target": "sub:DOM-ECO-decision-loop-automation",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0047",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0047",
      "target": "glossary:automated-decision-making",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0047",
      "target": "glossary:algorithmic-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0072",
      "target": "sub:DOM-SOC-representational-harm",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0072",
      "target": "glossary:deepfake",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0091",
      "target": "sub:DOM-SOC-allocational-harm",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0091",
      "target": "sub:DOM-SOC-proxy-discrimination",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0091",
      "target": "glossary:algorithmic-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0091",
      "target": "glossary:automated-decision-making",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0095",
      "target": "sub:DOM-CTL-safety-governance-override",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0095",
      "target": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0095",
      "target": "sub:DOM-SYS-lethal-autonomous-weapon-systems",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0095",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0095",
      "target": "glossary:human-in-the-loop",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0095",
      "target": "glossary:guardrail",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0042",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0042",
      "target": "sub:DOM-INF-ai-enabled-fraud",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0042",
      "target": "glossary:deepfake",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0051",
      "target": "sub:DOM-ECO-power-data-concentration",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0051",
      "target": "glossary:training-data",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0051",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0066",
      "target": "sub:DOM-SOC-allocational-harm",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0066",
      "target": "glossary:algorithmic-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0066",
      "target": "glossary:automated-decision-making",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0075",
      "target": "sub:DOM-INF-misinformation-hallucinated-content",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0075",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0075",
      "target": "glossary:hallucination",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0075",
      "target": "glossary:automated-decision-making",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0086",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0086",
      "target": "glossary:deepfake",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0086",
      "target": "glossary:synthetic-media",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0087",
      "target": "sub:DOM-SEC-data-poisoning",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0087",
      "target": "glossary:data-poisoning",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0087",
      "target": "glossary:supply-chain-attack",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0087",
      "target": "glossary:prompt-injection",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0089",
      "target": "sub:DOM-SEC-prompt-injection-attack",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0089",
      "target": "glossary:prompt-injection",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0089",
      "target": "glossary:remote-code-execution",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0096",
      "target": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0096",
      "target": "sub:DOM-AGT-specification-gaming",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0096",
      "target": "sub:DOM-AGT-goal-drift",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0029",
      "target": "sub:DOM-SYS-lethal-autonomous-weapon-systems",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0029",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0029",
      "target": "glossary:lethal-autonomous-weapon",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0029",
      "target": "glossary:automated-decision-making",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0029",
      "target": "glossary:human-in-the-loop",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0027",
      "target": "sub:DOM-ECO-automation-induced-job-degradation",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0027",
      "target": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0027",
      "target": "glossary:job-displacement",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0027",
      "target": "glossary:automated-decision-making",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0027",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0027",
      "target": "glossary:automation-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0092",
      "target": "sub:DOM-CTL-safety-governance-override",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0092",
      "target": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0092",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0092",
      "target": "glossary:guardrail",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0092",
      "target": "glossary:misalignment",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0003",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0003",
      "target": "sub:DOM-SYS-infrastructure-dependency-collapse",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0003",
      "target": "sub:DOM-INF-misinformation-hallucinated-content",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0004",
      "target": "sub:DOM-PRI-re-identification-attacks",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0004",
      "target": "sub:DOM-INF-ai-enabled-fraud",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0004",
      "target": "sub:DOM-SEC-model-inversion-data-extraction",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0004",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0001",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0001",
      "target": "sub:DOM-INF-ai-enabled-fraud",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0001",
      "target": "sub:DOM-SEC-social-engineering-via-ai",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0001",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0001",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0001",
      "target": "glossary:disinformation",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0032",
      "target": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0032",
      "target": "sub:DOM-CTL-safety-governance-override",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0032",
      "target": "sub:DOM-ECO-power-data-concentration",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0032",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0032",
      "target": "glossary:misalignment",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0032",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0026",
      "target": "sub:DOM-CTL-unsafe-human-in-the-loop-failures",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0026",
      "target": "sub:DOM-SEC-jailbreak-guardrail-bypass",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0026",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0026",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0026",
      "target": "glossary:chatbot",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0026",
      "target": "glossary:content-moderation",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0061",
      "target": "sub:DOM-AGT-goal-drift",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0061",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0025",
      "target": "sub:DOM-SEC-jailbreak-guardrail-bypass",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0025",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0058",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0058",
      "target": "sub:DOM-SOC-representational-harm",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0058",
      "target": "glossary:deepfake",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0058",
      "target": "glossary:synthetic-media",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0058",
      "target": "glossary:disinformation",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0078",
      "target": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0078",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0006",
      "target": "sub:DOM-AGT-memory-poisoning",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0006",
      "target": "sub:DOM-CTL-deceptive-manipulative-interfaces",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0006",
      "target": "sub:DOM-INF-disinformation-campaigns",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0006",
      "target": "glossary:prompt-injection",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0006",
      "target": "glossary:adversarial-attack",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0007",
      "target": "sub:DOM-AGT-memory-poisoning",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0007",
      "target": "sub:DOM-SEC-model-inversion-data-extraction",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0007",
      "target": "glossary:prompt-injection",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0007",
      "target": "glossary:adversarial-attack",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0007",
      "target": "glossary:data-leakage",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0014",
      "target": "sub:DOM-SEC-automated-vulnerability-discovery",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0014",
      "target": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0014",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0016",
      "target": "sub:DOM-SEC-ai-supply-chain-attack",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0016",
      "target": "sub:DOM-SEC-prompt-injection-attack",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0016",
      "target": "glossary:prompt-injection",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0016",
      "target": "glossary:supply-chain-attack",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0021",
      "target": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0021",
      "target": "glossary:prompt-injection",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0019",
      "target": "sub:DOM-AGT-agent-to-agent-propagation",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0028",
      "target": "sub:DOM-SYS-strategic-misalignment",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0028",
      "target": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0028",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0028",
      "target": "glossary:lethal-autonomous-weapon",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0028",
      "target": "glossary:misalignment",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0034",
      "target": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0034",
      "target": "sub:DOM-CTL-safety-governance-override",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0034",
      "target": "sub:DOM-SYS-strategic-misalignment",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0034",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0034",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0034",
      "target": "glossary:chatbot",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0036",
      "target": "sub:DOM-SYS-lethal-autonomous-weapon-systems",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0036",
      "target": "sub:DOM-SEC-automated-vulnerability-discovery",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0036",
      "target": "glossary:dual-use",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0036",
      "target": "glossary:automated-decision-making",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0041",
      "target": "sub:DOM-SYS-infrastructure-dependency-collapse",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0070",
      "target": "sub:DOM-AGT-specification-gaming",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0070",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0070",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0073",
      "target": "sub:DOM-CTL-deceptive-manipulative-interfaces",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0073",
      "target": "glossary:chatbot",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0073",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0040",
      "target": "sub:DOM-ECO-power-data-concentration",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0040",
      "target": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0040",
      "target": "glossary:training-data",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0040",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0040",
      "target": "glossary:data-extraction",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0040",
      "target": "glossary:data-concentration",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0044",
      "target": "sub:DOM-CTL-unsafe-human-in-the-loop-failures",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0044",
      "target": "glossary:autonomous-vehicle",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0044",
      "target": "glossary:human-in-the-loop",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0044",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0035",
      "target": "sub:DOM-SYS-strategic-misalignment",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0035",
      "target": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0035",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0035",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0035",
      "target": "glossary:lethal-autonomous-weapon",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0045",
      "target": "sub:DOM-CTL-loss-of-human-agency",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0045",
      "target": "sub:DOM-CTL-deceptive-manipulative-interfaces",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0045",
      "target": "glossary:chatbot",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0045",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0005",
      "target": "sub:DOM-ECO-automation-induced-job-degradation",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0005",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0010",
      "target": "sub:DOM-INF-synthetic-media-manipulation",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0010",
      "target": "sub:DOM-INF-misinformation-hallucinated-content",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0010",
      "target": "glossary:synthetic-media",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0010",
      "target": "glossary:deepfake",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0010",
      "target": "glossary:misinformation",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0013",
      "target": "sub:DOM-SEC-ai-supply-chain-attack",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0013",
      "target": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0013",
      "target": "glossary:supply-chain-attack",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0013",
      "target": "glossary:remote-code-execution",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0017",
      "target": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0017",
      "target": "sub:DOM-SEC-prompt-injection-attack",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0017",
      "target": "glossary:remote-code-execution",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0017",
      "target": "glossary:prompt-injection",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0020",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0020",
      "target": "sub:DOM-SEC-automated-vulnerability-discovery",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0020",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0023",
      "target": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0023",
      "target": "glossary:privilege-escalation",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0022",
      "target": "sub:DOM-SEC-prompt-injection-attack",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0022",
      "target": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0022",
      "target": "glossary:prompt-injection",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0022",
      "target": "glossary:remote-code-execution",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0031",
      "target": "sub:DOM-CTL-deceptive-manipulative-interfaces",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0031",
      "target": "sub:DOM-CTL-safety-governance-override",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0031",
      "target": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0031",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0031",
      "target": "glossary:chatbot",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0031",
      "target": "glossary:content-moderation",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0046",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0046",
      "target": "sub:DOM-SOC-allocational-harm",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0046",
      "target": "glossary:algorithmic-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0046",
      "target": "glossary:automated-decision-making",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0050",
      "target": "sub:DOM-SOC-data-imbalance-bias",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0050",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0050",
      "target": "glossary:algorithmic-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0050",
      "target": "glossary:data-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0050",
      "target": "glossary:automated-decision-making",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0052",
      "target": "sub:DOM-PRI-mass-surveillance-amplification",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0052",
      "target": "sub:DOM-PRI-biometric-exploitation",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0052",
      "target": "glossary:mass-surveillance",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0052",
      "target": "glossary:facial-recognition",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0052",
      "target": "glossary:biometric-data",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0055",
      "target": "sub:DOM-SEC-prompt-injection-attack",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0055",
      "target": "glossary:prompt-injection",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0055",
      "target": "glossary:remote-code-execution",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0055",
      "target": "glossary:supply-chain-attack",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0056",
      "target": "sub:DOM-PRI-behavioral-profiling-without-consent",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0056",
      "target": "sub:DOM-SOC-allocational-harm",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0056",
      "target": "glossary:algorithmic-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0056",
      "target": "glossary:automated-decision-making",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0062",
      "target": "sub:DOM-CTL-loss-of-human-agency",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0062",
      "target": "glossary:chatbot",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0062",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0062",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0063",
      "target": "sub:DOM-PRI-biometric-exploitation",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0063",
      "target": "glossary:facial-recognition",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0063",
      "target": "glossary:biometric-data",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0069",
      "target": "sub:DOM-INF-misinformation-hallucinated-content",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0069",
      "target": "glossary:hallucination",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0069",
      "target": "glossary:chatbot",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0069",
      "target": "glossary:disinformation",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0076",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0076",
      "target": "glossary:hallucination",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0076",
      "target": "glossary:chatbot",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0076",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0083",
      "target": "sub:DOM-PRI-mass-surveillance-amplification",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0083",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0083",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0090",
      "target": "sub:DOM-INF-disinformation-campaigns",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0090",
      "target": "glossary:deepfake",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0090",
      "target": "glossary:disinformation",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0090",
      "target": "glossary:synthetic-media",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0068",
      "target": "sub:DOM-PRI-mass-surveillance-amplification",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0068",
      "target": "glossary:mass-surveillance",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0068",
      "target": "glossary:automated-decision-making",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0077",
      "target": "sub:DOM-PRI-mass-surveillance-amplification",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-26-0077",
      "target": "sub:DOM-PRI-biometric-exploitation",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-26-0077",
      "target": "glossary:facial-recognition",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0077",
      "target": "glossary:biometric-data",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-26-0077",
      "target": "glossary:mass-surveillance",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0048",
      "target": "sub:DOM-CTL-safety-governance-override",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0048",
      "target": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0048",
      "target": "glossary:guardrail",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0016",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0020",
      "target": "sub:DOM-SOC-allocational-harm",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0026",
      "target": "sub:DOM-INF-misinformation-hallucinated-content",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0026",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0033",
      "target": "sub:DOM-SEC-automated-vulnerability-discovery",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0033",
      "target": "sub:DOM-SEC-jailbreak-guardrail-bypass",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0033",
      "target": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0033",
      "target": "glossary:prompt-injection",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0033",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0036",
      "target": "sub:DOM-SEC-social-engineering-via-ai",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0036",
      "target": "sub:DOM-SEC-ai-morphed-malware",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0036",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0038",
      "target": "sub:DOM-SOC-representational-harm",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0038",
      "target": "sub:DOM-CTL-deceptive-manipulative-interfaces",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0038",
      "target": "glossary:non-consensual-intimate-imagery",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0038",
      "target": "glossary:synthetic-media",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0038",
      "target": "glossary:content-moderation",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0038",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0010",
      "target": "sub:DOM-AGT-agent-to-agent-propagation",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0010",
      "target": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0010",
      "target": "glossary:prompt-injection",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0010",
      "target": "glossary:agentic-ai",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0010",
      "target": "glossary:multi-agent-system",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0039",
      "target": "sub:DOM-CTL-loss-of-human-agency",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0039",
      "target": "sub:DOM-CTL-deceptive-manipulative-interfaces",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0039",
      "target": "glossary:chatbot",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0039",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0039",
      "target": "glossary:content-moderation",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0039",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0046",
      "target": "sub:DOM-SEC-model-inversion-data-extraction",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0046",
      "target": "glossary:supply-chain-attack",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0046",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0019",
      "target": "sub:DOM-SYS-ai-assisted-biological-threat-design",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0022",
      "target": "sub:DOM-SYS-infrastructure-dependency-collapse",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0037",
      "target": "sub:DOM-CTL-loss-of-human-agency",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0037",
      "target": "sub:DOM-CTL-deceptive-manipulative-interfaces",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0037",
      "target": "glossary:chatbot",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0037",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0037",
      "target": "glossary:content-moderation",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0037",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0001",
      "target": "sub:DOM-SEC-automated-vulnerability-discovery",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0001",
      "target": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0001",
      "target": "glossary:agentic-ai",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0001",
      "target": "glossary:cyber-espionage",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0001",
      "target": "glossary:automated-vulnerability-discovery",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0011",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0011",
      "target": "glossary:hallucination",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0014",
      "target": "sub:DOM-PRI-mass-surveillance-amplification",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0014",
      "target": "sub:DOM-PRI-biometric-exploitation",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0043",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0043",
      "target": "glossary:automated-decision-making",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0043",
      "target": "glossary:algorithmic-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0007",
      "target": "sub:DOM-SEC-adversarial-evasion",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0007",
      "target": "sub:DOM-SEC-prompt-injection-attack",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0007",
      "target": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0007",
      "target": "glossary:prompt-injection",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0007",
      "target": "glossary:adversarial-attack",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0008",
      "target": "sub:DOM-SEC-adversarial-evasion",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0008",
      "target": "sub:DOM-SEC-prompt-injection-attack",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0008",
      "target": "sub:DOM-SEC-ai-supply-chain-attack",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0008",
      "target": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0008",
      "target": "glossary:prompt-injection",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0008",
      "target": "glossary:adversarial-attack",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0013",
      "target": "sub:DOM-CTL-unsafe-human-in-the-loop-failures",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0013",
      "target": "sub:DOM-AGT-goal-drift",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0005",
      "target": "sub:DOM-SEC-adversarial-evasion",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0005",
      "target": "sub:DOM-SEC-jailbreak-guardrail-bypass",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0005",
      "target": "sub:DOM-SEC-model-inversion-data-extraction",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0005",
      "target": "glossary:prompt-injection",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0005",
      "target": "glossary:jailbreak-attack",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0005",
      "target": "glossary:data-leakage",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0006",
      "target": "sub:DOM-PRI-behavioral-profiling-without-consent",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0006",
      "target": "sub:DOM-SEC-model-inversion-data-extraction",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0006",
      "target": "glossary:data-leakage",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0015",
      "target": "sub:DOM-AGT-goal-drift",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0015",
      "target": "sub:DOM-AGT-specification-gaming",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0015",
      "target": "sub:DOM-CTL-unsafe-human-in-the-loop-failures",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0021",
      "target": "sub:DOM-SOC-allocational-harm",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0021",
      "target": "sub:DOM-SOC-proxy-discrimination",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0041",
      "target": "sub:DOM-PRI-biometric-exploitation",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0041",
      "target": "sub:DOM-SOC-allocational-harm",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0041",
      "target": "glossary:facial-recognition",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0041",
      "target": "glossary:biometric-data",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0041",
      "target": "glossary:algorithmic-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0045",
      "target": "sub:DOM-SEC-social-engineering-via-ai",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0045",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0045",
      "target": "glossary:deepfake",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0045",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0004",
      "target": "sub:DOM-SEC-adversarial-evasion",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0004",
      "target": "sub:DOM-SEC-prompt-injection-attack",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0004",
      "target": "sub:DOM-SEC-model-inversion-data-extraction",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0004",
      "target": "glossary:prompt-injection",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0004",
      "target": "glossary:adversarial-attack",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0004",
      "target": "glossary:data-leakage",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0017",
      "target": "sub:DOM-SYS-strategic-misalignment",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0025",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0025",
      "target": "sub:DOM-INF-misinformation-hallucinated-content",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0035",
      "target": "sub:DOM-SEC-prompt-injection-attack",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0035",
      "target": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0035",
      "target": "glossary:prompt-injection",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0035",
      "target": "glossary:remote-code-execution",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0012",
      "target": "sub:DOM-AGT-goal-drift",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0012",
      "target": "sub:DOM-AGT-specification-gaming",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0024",
      "target": "sub:DOM-SEC-ai-morphed-malware",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0024",
      "target": "sub:DOM-INF-ai-enabled-fraud",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0024",
      "target": "sub:DOM-SEC-social-engineering-via-ai",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0024",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0030",
      "target": "sub:DOM-AGT-specification-gaming",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0030",
      "target": "sub:DOM-AGT-goal-drift",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0030",
      "target": "glossary:reward-hacking",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0030",
      "target": "glossary:goal-drift",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0030",
      "target": "glossary:misalignment",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0032",
      "target": "sub:DOM-SOC-proxy-discrimination",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0032",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0032",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0032",
      "target": "glossary:automation-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0031",
      "target": "sub:DOM-AGT-memory-poisoning",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0031",
      "target": "sub:DOM-SEC-data-poisoning",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0031",
      "target": "glossary:prompt-injection",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0031",
      "target": "glossary:adversarial-attack",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0031",
      "target": "glossary:retrieval-augmented-generation",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0028",
      "target": "sub:DOM-SEC-prompt-injection-attack",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0028",
      "target": "sub:DOM-AGT-memory-poisoning",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0028",
      "target": "glossary:prompt-injection",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0029",
      "target": "sub:DOM-SEC-jailbreak-guardrail-bypass",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0029",
      "target": "sub:DOM-SEC-adversarial-evasion",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0029",
      "target": "glossary:jailbreak-attack",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0002",
      "target": "sub:DOM-PRI-behavioral-profiling-without-consent",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0002",
      "target": "sub:DOM-SOC-data-imbalance-bias",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0002",
      "target": "glossary:gdpr",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0002",
      "target": "glossary:data-protection",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0003",
      "target": "sub:DOM-PRI-mass-surveillance-amplification",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0003",
      "target": "sub:DOM-SEC-model-inversion-data-extraction",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0003",
      "target": "glossary:mass-surveillance",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0018",
      "target": "sub:DOM-SEC-adversarial-evasion",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0018",
      "target": "sub:DOM-SEC-jailbreak-guardrail-bypass",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0027",
      "target": "sub:DOM-SEC-data-poisoning",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0027",
      "target": "sub:DOM-INF-misinformation-hallucinated-content",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0027",
      "target": "sub:DOM-SEC-adversarial-evasion",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0027",
      "target": "glossary:data-poisoning",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0027",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0034",
      "target": "sub:DOM-SEC-model-inversion-data-extraction",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0034",
      "target": "sub:DOM-SEC-ai-supply-chain-attack",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0034",
      "target": "sub:DOM-SYS-ai-capability-proliferation",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0034",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0040",
      "target": "sub:DOM-SOC-representational-harm",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0040",
      "target": "glossary:non-consensual-intimate-imagery",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0040",
      "target": "glossary:representational-harm",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0042",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0042",
      "target": "sub:DOM-INF-ai-enabled-fraud",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0042",
      "target": "glossary:deepfake",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0042",
      "target": "glossary:voice-cloning",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0042",
      "target": "glossary:synthetic-media",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0042",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0044",
      "target": "sub:DOM-SOC-allocational-harm",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0044",
      "target": "sub:DOM-PRI-biometric-exploitation",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-25-0044",
      "target": "glossary:facial-recognition",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0044",
      "target": "glossary:biometric-data",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0044",
      "target": "glossary:algorithmic-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0047",
      "target": "sub:DOM-SEC-jailbreak-guardrail-bypass",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-25-0047",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-25-0047",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0027",
      "target": "sub:DOM-AGT-multi-agent-coordination-failures",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-24-0027",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0013",
      "target": "sub:DOM-INF-consensus-reality-erosion",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-24-0013",
      "target": "sub:DOM-INF-disinformation-campaigns",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0013",
      "target": "sub:DOM-PRI-behavioral-profiling-without-consent",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0013",
      "target": "glossary:deepfake",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0013",
      "target": "glossary:disinformation",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0021",
      "target": "sub:DOM-CTL-unsafe-human-in-the-loop-failures",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-24-0021",
      "target": "sub:DOM-AGT-goal-drift",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0021",
      "target": "glossary:autonomous-vehicle",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0011",
      "target": "sub:DOM-SYS-strategic-misalignment",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-24-0011",
      "target": "sub:DOM-CTL-loss-of-human-agency",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0015",
      "target": "sub:DOM-SYS-uncontrolled-recursive-self-improvement",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-24-0015",
      "target": "sub:DOM-AGT-specification-gaming",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0015",
      "target": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0015",
      "target": "glossary:artificial-general-intelligence",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0020",
      "target": "sub:DOM-SEC-adversarial-evasion",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-24-0020",
      "target": "sub:DOM-SEC-prompt-injection-attack",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0020",
      "target": "sub:DOM-SEC-model-inversion-data-extraction",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0020",
      "target": "glossary:prompt-injection",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0014",
      "target": "sub:DOM-SOC-allocational-harm",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-24-0022",
      "target": "sub:DOM-SEC-model-inversion-data-extraction",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-24-0024",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-24-0006",
      "target": "sub:DOM-PRI-biometric-exploitation",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-24-0006",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0006",
      "target": "glossary:deepfake",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0006",
      "target": "glossary:voice-cloning",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0006",
      "target": "glossary:biometric-data",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0019",
      "target": "sub:DOM-PRI-behavioral-profiling-without-consent",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-24-0019",
      "target": "glossary:behavioral-profiling",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0023",
      "target": "sub:DOM-INF-misinformation-hallucinated-content",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-24-0023",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0016",
      "target": "sub:DOM-SOC-allocational-harm",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-24-0018",
      "target": "sub:DOM-INF-synthetic-media-manipulation",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-24-0018",
      "target": "sub:DOM-INF-disinformation-campaigns",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0012",
      "target": "sub:DOM-AGT-agent-to-agent-propagation",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-24-0012",
      "target": "sub:DOM-SEC-ai-morphed-malware",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0012",
      "target": "glossary:prompt-injection",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0012",
      "target": "glossary:adversarial-attack",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0017",
      "target": "sub:DOM-PRI-biometric-exploitation",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-24-0017",
      "target": "sub:DOM-PRI-mass-surveillance-amplification",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0026",
      "target": "sub:DOM-INF-misinformation-hallucinated-content",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-24-0026",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0009",
      "target": "sub:DOM-SOC-representational-harm",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-24-0009",
      "target": "sub:DOM-CTL-deceptive-manipulative-interfaces",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0009",
      "target": "glossary:algorithmic-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0009",
      "target": "glossary:representational-harm",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0010",
      "target": "sub:DOM-CTL-deceptive-manipulative-interfaces",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-24-0010",
      "target": "sub:DOM-AGT-goal-drift",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0010",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0001",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-24-0001",
      "target": "sub:DOM-INF-ai-enabled-fraud",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0001",
      "target": "sub:DOM-SEC-social-engineering-via-ai",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0001",
      "target": "sub:DOM-SEC-adversarial-evasion",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0001",
      "target": "glossary:deepfake",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0001",
      "target": "glossary:voice-cloning",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0001",
      "target": "glossary:synthetic-media",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0002",
      "target": "sub:DOM-INF-disinformation-campaigns",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-24-0002",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0002",
      "target": "glossary:deepfake",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0002",
      "target": "glossary:voice-cloning",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0002",
      "target": "glossary:synthetic-media",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0002",
      "target": "glossary:robocall",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0002",
      "target": "glossary:election-interference",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0003",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-24-0003",
      "target": "sub:DOM-SOC-representational-harm",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0003",
      "target": "glossary:deepfake",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0003",
      "target": "glossary:voice-cloning",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0003",
      "target": "glossary:synthetic-media",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0004",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-24-0004",
      "target": "sub:DOM-INF-ai-enabled-fraud",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0004",
      "target": "sub:DOM-SEC-social-engineering-via-ai",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0004",
      "target": "sub:DOM-SEC-ai-morphed-malware",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0004",
      "target": "glossary:voice-cloning",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0004",
      "target": "glossary:deepfake",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0004",
      "target": "glossary:elder-fraud",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0004",
      "target": "glossary:robocall",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0004",
      "target": "glossary:vishing",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0007",
      "target": "sub:DOM-SEC-adversarial-evasion",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-24-0007",
      "target": "sub:DOM-SEC-prompt-injection-attack",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0007",
      "target": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0007",
      "target": "glossary:prompt-injection",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0007",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0007",
      "target": "glossary:adversarial-attack",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0008",
      "target": "sub:DOM-INF-synthetic-media-manipulation",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-24-0008",
      "target": "sub:DOM-SOC-representational-harm",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-24-0008",
      "target": "glossary:deepfake",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0008",
      "target": "glossary:synthetic-media",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0008",
      "target": "glossary:non-consensual-intimate-imagery",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-24-0025",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-23-0011",
      "target": "sub:DOM-ECO-power-data-concentration",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-23-0011",
      "target": "sub:DOM-PRI-behavioral-profiling-without-consent",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-23-0011",
      "target": "glossary:training-data",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0013",
      "target": "sub:DOM-PRI-biometric-exploitation",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-23-0013",
      "target": "sub:DOM-SOC-data-imbalance-bias",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-23-0013",
      "target": "glossary:facial-recognition",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0013",
      "target": "glossary:biometric-data",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0013",
      "target": "glossary:algorithmic-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0015",
      "target": "sub:DOM-INF-misinformation-hallucinated-content",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-23-0015",
      "target": "sub:DOM-CTL-deceptive-manipulative-interfaces",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-23-0015",
      "target": "glossary:misinformation",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0015",
      "target": "glossary:synthetic-media",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0008",
      "target": "sub:DOM-INF-synthetic-media-manipulation",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-23-0008",
      "target": "sub:DOM-SOC-representational-harm",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-23-0008",
      "target": "glossary:deepfake",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0008",
      "target": "glossary:synthetic-media",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0008",
      "target": "glossary:non-consensual-intimate-imagery",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0007",
      "target": "sub:DOM-INF-disinformation-campaigns",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-23-0007",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-23-0007",
      "target": "glossary:deepfake",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0007",
      "target": "glossary:disinformation",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0007",
      "target": "glossary:election-interference",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0012",
      "target": "sub:DOM-PRI-behavioral-profiling-without-consent",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-23-0012",
      "target": "sub:DOM-CTL-loss-of-human-agency",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-23-0012",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0012",
      "target": "glossary:data-protection",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0006",
      "target": "sub:DOM-SEC-ai-morphed-malware",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-23-0006",
      "target": "sub:DOM-SEC-social-engineering-via-ai",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-23-0006",
      "target": "sub:DOM-INF-disinformation-campaigns",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-23-0006",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0006",
      "target": "glossary:phishing",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0006",
      "target": "glossary:business-email-compromise",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0005",
      "target": "sub:DOM-INF-misinformation-hallucinated-content",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-23-0005",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-23-0005",
      "target": "glossary:hallucination",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0005",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0010",
      "target": "sub:DOM-ECO-economic-dependency-on-black-box-systems",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-23-0010",
      "target": "sub:DOM-CTL-loss-of-human-agency",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-23-0002",
      "target": "sub:DOM-SEC-model-inversion-data-extraction",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-23-0002",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-23-0002",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0002",
      "target": "glossary:training-data",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0002",
      "target": "glossary:data-leakage",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0003",
      "target": "sub:DOM-PRI-sensitive-attribute-inference",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-23-0003",
      "target": "sub:DOM-PRI-behavioral-profiling-without-consent",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-23-0003",
      "target": "glossary:gdpr",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0003",
      "target": "glossary:data-protection",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0003",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0004",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-23-0004",
      "target": "sub:DOM-INF-ai-enabled-fraud",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-23-0004",
      "target": "sub:DOM-SEC-social-engineering-via-ai",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-23-0004",
      "target": "sub:DOM-SEC-ai-morphed-malware",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-23-0004",
      "target": "glossary:voice-cloning",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0004",
      "target": "glossary:deepfake",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0004",
      "target": "glossary:grandparent-scam",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0016",
      "target": "sub:DOM-SEC-adversarial-evasion",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-23-0016",
      "target": "sub:DOM-SEC-prompt-injection-attack",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-23-0016",
      "target": "sub:DOM-SEC-jailbreak-guardrail-bypass",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-23-0016",
      "target": "sub:DOM-INF-misinformation-hallucinated-content",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-23-0016",
      "target": "glossary:prompt-injection",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0016",
      "target": "glossary:jailbreak-attack",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0016",
      "target": "glossary:adversarial-attack",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0001",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-23-0001",
      "target": "sub:DOM-SEC-ai-morphed-malware",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-23-0001",
      "target": "glossary:deepfake",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0001",
      "target": "glossary:voice-cloning",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0001",
      "target": "glossary:vishing",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0001",
      "target": "glossary:smishing",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0014",
      "target": "sub:DOM-SEC-model-inversion-data-extraction",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-23-0014",
      "target": "sub:DOM-PRI-re-identification-attacks",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-23-0014",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0014",
      "target": "glossary:training-data",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0017",
      "target": "sub:DOM-ECO-decision-loop-automation",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-23-0017",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-23-0018",
      "target": "sub:DOM-ECO-automation-induced-job-degradation",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-23-0018",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-23-0018",
      "target": "glossary:automated-decision-making",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-22-0003",
      "target": "sub:DOM-SEC-ai-supply-chain-attack",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-22-0003",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-22-0003",
      "target": "glossary:supply-chain-attack",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-22-0005",
      "target": "sub:DOM-AGT-cascading-hallucinations",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-22-0005",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-22-0005",
      "target": "glossary:hallucination",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-22-0005",
      "target": "glossary:chatbot",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-22-0005",
      "target": "glossary:automation-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-22-0004",
      "target": "sub:DOM-ECO-market-manipulation-via-ai",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-22-0004",
      "target": "sub:DOM-SOC-allocational-harm",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-22-0004",
      "target": "glossary:market-manipulation",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-22-0002",
      "target": "sub:DOM-SOC-algorithmic-amplification",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-22-0002",
      "target": "sub:DOM-PRI-behavioral-profiling-without-consent",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-22-0002",
      "target": "glossary:algorithmic-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-22-0001",
      "target": "sub:DOM-SYS-ai-assisted-biological-threat-design",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-22-0001",
      "target": "sub:DOM-SEC-automated-vulnerability-discovery",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-22-0001",
      "target": "glossary:dual-use",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-21-0001",
      "target": "sub:DOM-CTL-deceptive-manipulative-interfaces",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-21-0001",
      "target": "sub:DOM-AGT-goal-drift",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-21-0001",
      "target": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-20-0004",
      "target": "sub:DOM-SOC-proxy-discrimination",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-20-0004",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-20-0002",
      "target": "sub:DOM-SOC-allocational-harm",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-20-0002",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-20-0002",
      "target": "glossary:algorithmic-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-20-0002",
      "target": "glossary:automation-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-20-0003",
      "target": "sub:DOM-SYS-lethal-autonomous-weapon-systems",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-20-0003",
      "target": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-20-0003",
      "target": "glossary:lethal-autonomous-weapon",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-20-0003",
      "target": "glossary:human-in-the-loop",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-20-0001",
      "target": "sub:DOM-PRI-mass-surveillance-amplification",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-20-0001",
      "target": "sub:DOM-PRI-biometric-exploitation",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-20-0001",
      "target": "glossary:biometric-data",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-20-0001",
      "target": "glossary:facial-recognition",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-20-0001",
      "target": "glossary:mass-surveillance",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-20-0005",
      "target": "sub:DOM-SOC-data-imbalance-bias",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-20-0005",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-20-0005",
      "target": "sub:DOM-SOC-proxy-discrimination",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-20-0005",
      "target": "glossary:facial-recognition",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-20-0005",
      "target": "glossary:algorithmic-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-20-0005",
      "target": "glossary:data-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-20-0005",
      "target": "glossary:proxy-discrimination",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-20-0005",
      "target": "glossary:automation-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-20-0005",
      "target": "glossary:training-data",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-20-0006",
      "target": "sub:DOM-INF-misinformation-hallucinated-content",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-19-0001",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-19-0001",
      "target": "sub:DOM-INF-ai-enabled-fraud",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-19-0001",
      "target": "sub:DOM-SEC-social-engineering-via-ai",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-19-0001",
      "target": "sub:DOM-SEC-adversarial-evasion",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-19-0001",
      "target": "glossary:deepfake",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-19-0001",
      "target": "glossary:voice-cloning",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-19-0001",
      "target": "glossary:synthetic-media",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-18-0002",
      "target": "sub:DOM-SOC-allocational-harm",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-18-0002",
      "target": "sub:DOM-SOC-data-imbalance-bias",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-18-0002",
      "target": "glossary:algorithmic-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-18-0002",
      "target": "glossary:training-data",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-18-0003",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-18-0003",
      "target": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-18-0003",
      "target": "glossary:automation-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-18-0003",
      "target": "glossary:human-in-the-loop",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-18-0003",
      "target": "glossary:autonomous-vehicle",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-18-0001",
      "target": "sub:DOM-CTL-unsafe-human-in-the-loop-failures",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-18-0001",
      "target": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-18-0001",
      "target": "glossary:autonomous-vehicle",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-18-0001",
      "target": "glossary:human-in-the-loop",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-17-0001",
      "target": "sub:DOM-INF-misinformation-hallucinated-content",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-17-0001",
      "target": "sub:DOM-SOC-proxy-discrimination",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-17-0001",
      "target": "glossary:algorithmic-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-17-0001",
      "target": "glossary:automated-decision-making",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-16-0001",
      "target": "sub:DOM-SOC-allocational-harm",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-16-0001",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-16-0001",
      "target": "glossary:algorithmic-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-16-0001",
      "target": "glossary:automation-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-16-0003",
      "target": "sub:DOM-SOC-proxy-discrimination",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-16-0003",
      "target": "sub:DOM-CTL-implicit-authority-transfer",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-16-0003",
      "target": "glossary:algorithmic-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-16-0003",
      "target": "glossary:proxy-discrimination",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-16-0002",
      "target": "sub:DOM-AGT-goal-drift",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-16-0002",
      "target": "sub:DOM-INF-synthetic-media-manipulation",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-16-0002",
      "target": "glossary:chatbot",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-16-0002",
      "target": "glossary:adversarial-attack",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-16-0002",
      "target": "glossary:large-language-model",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-13-0001",
      "target": "sub:DOM-SOC-proxy-discrimination",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-13-0001",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-13-0001",
      "target": "glossary:proxy-variable",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-13-0001",
      "target": "glossary:algorithmic-bias",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-13-0001",
      "target": "glossary:automated-decision-making",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-10-0001",
      "target": "sub:DOM-SYS-infrastructure-dependency-collapse",
      "relationship": "primary_pattern"
    },
    {
      "source": "incident:INC-10-0001",
      "target": "sub:DOM-AGT-multi-agent-coordination-failures",
      "relationship": "secondary_pattern"
    },
    {
      "source": "incident:INC-10-0001",
      "target": "glossary:algorithmic-trading",
      "relationship": "references_term"
    },
    {
      "source": "incident:INC-10-0001",
      "target": "glossary:cascading-failure",
      "relationship": "references_term"
    },
    {
      "source": "sub:DOM-AGT-agent-to-agent-propagation",
      "target": "glossary:agent-propagation",
      "relationship": "references_term"
    },
    {
      "source": "glossary:agent-propagation",
      "target": "sub:DOM-AGT-agent-to-agent-propagation",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-AGT-agent-to-agent-propagation",
      "target": "glossary:multi-agent-system",
      "relationship": "references_term"
    },
    {
      "source": "glossary:multi-agent-system",
      "target": "sub:DOM-AGT-agent-to-agent-propagation",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-AGT-agent-to-agent-propagation",
      "target": "glossary:contagion",
      "relationship": "references_term"
    },
    {
      "source": "glossary:contagion",
      "target": "sub:DOM-AGT-agent-to-agent-propagation",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-AGT-cascading-hallucinations",
      "target": "glossary:hallucination",
      "relationship": "references_term"
    },
    {
      "source": "glossary:hallucination",
      "target": "sub:DOM-AGT-cascading-hallucinations",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-AGT-cascading-hallucinations",
      "target": "glossary:cascading-failure",
      "relationship": "references_term"
    },
    {
      "source": "glossary:cascading-failure",
      "target": "sub:DOM-AGT-cascading-hallucinations",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-AGT-cascading-hallucinations",
      "target": "glossary:information-integrity",
      "relationship": "references_term"
    },
    {
      "source": "glossary:information-integrity",
      "target": "sub:DOM-AGT-cascading-hallucinations",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-AGT-goal-drift",
      "target": "glossary:goal-drift",
      "relationship": "references_term"
    },
    {
      "source": "glossary:goal-drift",
      "target": "sub:DOM-AGT-goal-drift",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-AGT-goal-drift",
      "target": "glossary:reward-hacking",
      "relationship": "references_term"
    },
    {
      "source": "glossary:reward-hacking",
      "target": "sub:DOM-AGT-goal-drift",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-AGT-goal-drift",
      "target": "glossary:alignment",
      "relationship": "references_term"
    },
    {
      "source": "glossary:alignment",
      "target": "sub:DOM-AGT-goal-drift",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-AGT-memory-poisoning",
      "target": "glossary:memory-poisoning",
      "relationship": "references_term"
    },
    {
      "source": "glossary:memory-poisoning",
      "target": "sub:DOM-AGT-memory-poisoning",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-AGT-memory-poisoning",
      "target": "glossary:context-injection",
      "relationship": "references_term"
    },
    {
      "source": "glossary:context-injection",
      "target": "sub:DOM-AGT-memory-poisoning",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-AGT-memory-poisoning",
      "target": "glossary:persistent-memory",
      "relationship": "references_term"
    },
    {
      "source": "glossary:persistent-memory",
      "target": "sub:DOM-AGT-memory-poisoning",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-AGT-memory-poisoning",
      "target": "glossary:prompt-injection",
      "relationship": "references_term"
    },
    {
      "source": "glossary:prompt-injection",
      "target": "sub:DOM-AGT-memory-poisoning",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-AGT-memory-poisoning",
      "target": "glossary:retrieval-augmented-generation",
      "relationship": "references_term"
    },
    {
      "source": "glossary:retrieval-augmented-generation",
      "target": "sub:DOM-AGT-memory-poisoning",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-AGT-multi-agent-coordination-failures",
      "target": "glossary:multi-agent-system",
      "relationship": "references_term"
    },
    {
      "source": "glossary:multi-agent-system",
      "target": "sub:DOM-AGT-multi-agent-coordination-failures",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-AGT-multi-agent-coordination-failures",
      "target": "glossary:emergent-behavior",
      "relationship": "references_term"
    },
    {
      "source": "glossary:emergent-behavior",
      "target": "sub:DOM-AGT-multi-agent-coordination-failures",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-AGT-multi-agent-coordination-failures",
      "target": "glossary:coordination-failure",
      "relationship": "references_term"
    },
    {
      "source": "glossary:coordination-failure",
      "target": "sub:DOM-AGT-multi-agent-coordination-failures",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-AGT-specification-gaming",
      "target": "glossary:reward-hacking",
      "relationship": "references_term"
    },
    {
      "source": "glossary:reward-hacking",
      "target": "sub:DOM-AGT-specification-gaming",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-AGT-specification-gaming",
      "target": "glossary:goal-drift",
      "relationship": "references_term"
    },
    {
      "source": "glossary:goal-drift",
      "target": "sub:DOM-AGT-specification-gaming",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-AGT-specification-gaming",
      "target": "glossary:alignment",
      "relationship": "references_term"
    },
    {
      "source": "glossary:alignment",
      "target": "sub:DOM-AGT-specification-gaming",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-AGT-specification-gaming",
      "target": "glossary:goodharts-law",
      "relationship": "references_term"
    },
    {
      "source": "glossary:goodharts-law",
      "target": "sub:DOM-AGT-specification-gaming",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "target": "glossary:privilege-escalation",
      "relationship": "references_term"
    },
    {
      "source": "glossary:privilege-escalation",
      "target": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "target": "glossary:agent-safety",
      "relationship": "references_term"
    },
    {
      "source": "glossary:agent-safety",
      "target": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SOC-algorithmic-amplification",
      "target": "glossary:algorithmic-amplification",
      "relationship": "references_term"
    },
    {
      "source": "glossary:algorithmic-amplification",
      "target": "sub:DOM-SOC-algorithmic-amplification",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SOC-algorithmic-amplification",
      "target": "glossary:recommendation-system",
      "relationship": "references_term"
    },
    {
      "source": "glossary:recommendation-system",
      "target": "sub:DOM-SOC-algorithmic-amplification",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SOC-algorithmic-amplification",
      "target": "glossary:engagement-optimization",
      "relationship": "references_term"
    },
    {
      "source": "glossary:engagement-optimization",
      "target": "sub:DOM-SOC-algorithmic-amplification",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SOC-allocational-harm",
      "target": "glossary:allocational-harm",
      "relationship": "references_term"
    },
    {
      "source": "glossary:allocational-harm",
      "target": "sub:DOM-SOC-allocational-harm",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SOC-allocational-harm",
      "target": "glossary:algorithmic-bias",
      "relationship": "references_term"
    },
    {
      "source": "glossary:algorithmic-bias",
      "target": "sub:DOM-SOC-allocational-harm",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SOC-allocational-harm",
      "target": "glossary:disparate-impact",
      "relationship": "references_term"
    },
    {
      "source": "glossary:disparate-impact",
      "target": "sub:DOM-SOC-allocational-harm",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SOC-data-imbalance-bias",
      "target": "glossary:data-bias",
      "relationship": "references_term"
    },
    {
      "source": "glossary:data-bias",
      "target": "sub:DOM-SOC-data-imbalance-bias",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SOC-data-imbalance-bias",
      "target": "glossary:training-data",
      "relationship": "references_term"
    },
    {
      "source": "glossary:training-data",
      "target": "sub:DOM-SOC-data-imbalance-bias",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SOC-data-imbalance-bias",
      "target": "glossary:representation-gap",
      "relationship": "references_term"
    },
    {
      "source": "glossary:representation-gap",
      "target": "sub:DOM-SOC-data-imbalance-bias",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SOC-proxy-discrimination",
      "target": "glossary:proxy-discrimination",
      "relationship": "references_term"
    },
    {
      "source": "glossary:proxy-discrimination",
      "target": "sub:DOM-SOC-proxy-discrimination",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SOC-proxy-discrimination",
      "target": "glossary:protected-characteristics",
      "relationship": "references_term"
    },
    {
      "source": "glossary:protected-characteristics",
      "target": "sub:DOM-SOC-proxy-discrimination",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SOC-proxy-discrimination",
      "target": "glossary:fairness",
      "relationship": "references_term"
    },
    {
      "source": "glossary:fairness",
      "target": "sub:DOM-SOC-proxy-discrimination",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SOC-representational-harm",
      "target": "glossary:representational-harm",
      "relationship": "references_term"
    },
    {
      "source": "glossary:representational-harm",
      "target": "sub:DOM-SOC-representational-harm",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SOC-representational-harm",
      "target": "glossary:stereotyping",
      "relationship": "references_term"
    },
    {
      "source": "glossary:stereotyping",
      "target": "sub:DOM-SOC-representational-harm",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SOC-representational-harm",
      "target": "glossary:erasure",
      "relationship": "references_term"
    },
    {
      "source": "glossary:erasure",
      "target": "sub:DOM-SOC-representational-harm",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-ECO-automation-induced-job-degradation",
      "target": "glossary:automation",
      "relationship": "references_term"
    },
    {
      "source": "glossary:automation",
      "target": "sub:DOM-ECO-automation-induced-job-degradation",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-ECO-automation-induced-job-degradation",
      "target": "glossary:job-displacement",
      "relationship": "references_term"
    },
    {
      "source": "glossary:job-displacement",
      "target": "sub:DOM-ECO-automation-induced-job-degradation",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-ECO-automation-induced-job-degradation",
      "target": "glossary:deskilling",
      "relationship": "references_term"
    },
    {
      "source": "glossary:deskilling",
      "target": "sub:DOM-ECO-automation-induced-job-degradation",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-ECO-decision-loop-automation",
      "target": "glossary:decision-loop",
      "relationship": "references_term"
    },
    {
      "source": "glossary:decision-loop",
      "target": "sub:DOM-ECO-decision-loop-automation",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-ECO-decision-loop-automation",
      "target": "glossary:automated-decision-making",
      "relationship": "references_term"
    },
    {
      "source": "glossary:automated-decision-making",
      "target": "sub:DOM-ECO-decision-loop-automation",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-ECO-decision-loop-automation",
      "target": "glossary:feedback-loop",
      "relationship": "references_term"
    },
    {
      "source": "glossary:feedback-loop",
      "target": "sub:DOM-ECO-decision-loop-automation",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-ECO-economic-dependency-on-black-box-systems",
      "target": "glossary:black-box-system",
      "relationship": "references_term"
    },
    {
      "source": "glossary:black-box-system",
      "target": "sub:DOM-ECO-economic-dependency-on-black-box-systems",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-ECO-economic-dependency-on-black-box-systems",
      "target": "glossary:explainability",
      "relationship": "references_term"
    },
    {
      "source": "glossary:explainability",
      "target": "sub:DOM-ECO-economic-dependency-on-black-box-systems",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-ECO-economic-dependency-on-black-box-systems",
      "target": "glossary:vendor-lock-in",
      "relationship": "references_term"
    },
    {
      "source": "glossary:vendor-lock-in",
      "target": "sub:DOM-ECO-economic-dependency-on-black-box-systems",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-ECO-market-manipulation-via-ai",
      "target": "glossary:market-manipulation",
      "relationship": "references_term"
    },
    {
      "source": "glossary:market-manipulation",
      "target": "sub:DOM-ECO-market-manipulation-via-ai",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-ECO-market-manipulation-via-ai",
      "target": "glossary:algorithmic-trading",
      "relationship": "references_term"
    },
    {
      "source": "glossary:algorithmic-trading",
      "target": "sub:DOM-ECO-market-manipulation-via-ai",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-ECO-market-manipulation-via-ai",
      "target": "glossary:price-fixing",
      "relationship": "references_term"
    },
    {
      "source": "glossary:price-fixing",
      "target": "sub:DOM-ECO-market-manipulation-via-ai",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-ECO-power-data-concentration",
      "target": "glossary:data-concentration",
      "relationship": "references_term"
    },
    {
      "source": "glossary:data-concentration",
      "target": "sub:DOM-ECO-power-data-concentration",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-ECO-power-data-concentration",
      "target": "glossary:market-power",
      "relationship": "references_term"
    },
    {
      "source": "glossary:market-power",
      "target": "sub:DOM-ECO-power-data-concentration",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-ECO-power-data-concentration",
      "target": "glossary:digital-monopoly",
      "relationship": "references_term"
    },
    {
      "source": "glossary:digital-monopoly",
      "target": "sub:DOM-ECO-power-data-concentration",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-CTL-overreliance-automation-bias",
      "target": "glossary:automation-bias",
      "relationship": "references_term"
    },
    {
      "source": "glossary:automation-bias",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-CTL-overreliance-automation-bias",
      "target": "glossary:overreliance",
      "relationship": "references_term"
    },
    {
      "source": "glossary:overreliance",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-CTL-overreliance-automation-bias",
      "target": "glossary:complacency",
      "relationship": "references_term"
    },
    {
      "source": "glossary:complacency",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-CTL-deceptive-manipulative-interfaces",
      "target": "glossary:dark-pattern",
      "relationship": "references_term"
    },
    {
      "source": "glossary:dark-pattern",
      "target": "sub:DOM-CTL-deceptive-manipulative-interfaces",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-CTL-deceptive-manipulative-interfaces",
      "target": "glossary:manipulative-design",
      "relationship": "references_term"
    },
    {
      "source": "glossary:manipulative-design",
      "target": "sub:DOM-CTL-deceptive-manipulative-interfaces",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-CTL-deceptive-manipulative-interfaces",
      "target": "glossary:persuasive-technology",
      "relationship": "references_term"
    },
    {
      "source": "glossary:persuasive-technology",
      "target": "sub:DOM-CTL-deceptive-manipulative-interfaces",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-CTL-implicit-authority-transfer",
      "target": "glossary:authority-transfer",
      "relationship": "references_term"
    },
    {
      "source": "glossary:authority-transfer",
      "target": "sub:DOM-CTL-implicit-authority-transfer",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-CTL-implicit-authority-transfer",
      "target": "glossary:governance",
      "relationship": "references_term"
    },
    {
      "source": "glossary:governance",
      "target": "sub:DOM-CTL-implicit-authority-transfer",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-CTL-implicit-authority-transfer",
      "target": "glossary:accountability",
      "relationship": "references_term"
    },
    {
      "source": "glossary:accountability",
      "target": "sub:DOM-CTL-implicit-authority-transfer",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-CTL-loss-of-human-agency",
      "target": "glossary:human-agency",
      "relationship": "references_term"
    },
    {
      "source": "glossary:human-agency",
      "target": "sub:DOM-CTL-loss-of-human-agency",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-CTL-loss-of-human-agency",
      "target": "glossary:autonomy",
      "relationship": "references_term"
    },
    {
      "source": "glossary:autonomy",
      "target": "sub:DOM-CTL-loss-of-human-agency",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-CTL-loss-of-human-agency",
      "target": "glossary:self-determination",
      "relationship": "references_term"
    },
    {
      "source": "glossary:self-determination",
      "target": "sub:DOM-CTL-loss-of-human-agency",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-CTL-safety-governance-override",
      "target": "glossary:ai-safety",
      "relationship": "references_term"
    },
    {
      "source": "glossary:ai-safety",
      "target": "sub:DOM-CTL-safety-governance-override",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-CTL-safety-governance-override",
      "target": "glossary:guardrail",
      "relationship": "references_term"
    },
    {
      "source": "glossary:guardrail",
      "target": "sub:DOM-CTL-safety-governance-override",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-CTL-unsafe-human-in-the-loop-failures",
      "target": "glossary:human-in-the-loop",
      "relationship": "references_term"
    },
    {
      "source": "glossary:human-in-the-loop",
      "target": "sub:DOM-CTL-unsafe-human-in-the-loop-failures",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-CTL-unsafe-human-in-the-loop-failures",
      "target": "glossary:alert-fatigue",
      "relationship": "references_term"
    },
    {
      "source": "glossary:alert-fatigue",
      "target": "sub:DOM-CTL-unsafe-human-in-the-loop-failures",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-CTL-unsafe-human-in-the-loop-failures",
      "target": "glossary:safety-critical",
      "relationship": "references_term"
    },
    {
      "source": "glossary:safety-critical",
      "target": "sub:DOM-CTL-unsafe-human-in-the-loop-failures",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-INF-ai-enabled-fraud",
      "target": "glossary:deepfake",
      "relationship": "references_term"
    },
    {
      "source": "glossary:deepfake",
      "target": "sub:DOM-INF-ai-enabled-fraud",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-INF-ai-enabled-fraud",
      "target": "glossary:voice-cloning",
      "relationship": "references_term"
    },
    {
      "source": "glossary:voice-cloning",
      "target": "sub:DOM-INF-ai-enabled-fraud",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-INF-ai-enabled-fraud",
      "target": "glossary:social-engineering",
      "relationship": "references_term"
    },
    {
      "source": "glossary:social-engineering",
      "target": "sub:DOM-INF-ai-enabled-fraud",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-INF-ai-enabled-fraud",
      "target": "glossary:phishing",
      "relationship": "references_term"
    },
    {
      "source": "glossary:phishing",
      "target": "sub:DOM-INF-ai-enabled-fraud",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-INF-consensus-reality-erosion",
      "target": "glossary:epistemic-crisis",
      "relationship": "references_term"
    },
    {
      "source": "glossary:epistemic-crisis",
      "target": "sub:DOM-INF-consensus-reality-erosion",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-INF-consensus-reality-erosion",
      "target": "glossary:information-ecosystem",
      "relationship": "references_term"
    },
    {
      "source": "glossary:information-ecosystem",
      "target": "sub:DOM-INF-consensus-reality-erosion",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-INF-consensus-reality-erosion",
      "target": "glossary:synthetic-media",
      "relationship": "references_term"
    },
    {
      "source": "glossary:synthetic-media",
      "target": "sub:DOM-INF-consensus-reality-erosion",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-INF-deepfake-identity-hijacking",
      "target": "glossary:deepfake",
      "relationship": "references_term"
    },
    {
      "source": "glossary:deepfake",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-INF-deepfake-identity-hijacking",
      "target": "glossary:voice-cloning",
      "relationship": "references_term"
    },
    {
      "source": "glossary:voice-cloning",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-INF-deepfake-identity-hijacking",
      "target": "glossary:synthetic-media",
      "relationship": "references_term"
    },
    {
      "source": "glossary:synthetic-media",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-INF-deepfake-identity-hijacking",
      "target": "glossary:biometric-data",
      "relationship": "references_term"
    },
    {
      "source": "glossary:biometric-data",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-INF-disinformation-campaigns",
      "target": "glossary:disinformation",
      "relationship": "references_term"
    },
    {
      "source": "glossary:disinformation",
      "target": "sub:DOM-INF-disinformation-campaigns",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-INF-disinformation-campaigns",
      "target": "glossary:coordinated-inauthentic-behavior",
      "relationship": "references_term"
    },
    {
      "source": "glossary:coordinated-inauthentic-behavior",
      "target": "sub:DOM-INF-disinformation-campaigns",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-INF-disinformation-campaigns",
      "target": "glossary:propaganda",
      "relationship": "references_term"
    },
    {
      "source": "glossary:propaganda",
      "target": "sub:DOM-INF-disinformation-campaigns",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-INF-misinformation-hallucinated-content",
      "target": "glossary:hallucination",
      "relationship": "references_term"
    },
    {
      "source": "glossary:hallucination",
      "target": "sub:DOM-INF-misinformation-hallucinated-content",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-INF-misinformation-hallucinated-content",
      "target": "glossary:misinformation",
      "relationship": "references_term"
    },
    {
      "source": "glossary:misinformation",
      "target": "sub:DOM-INF-misinformation-hallucinated-content",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-INF-misinformation-hallucinated-content",
      "target": "glossary:confabulation",
      "relationship": "references_term"
    },
    {
      "source": "glossary:confabulation",
      "target": "sub:DOM-INF-misinformation-hallucinated-content",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-INF-synthetic-media-manipulation",
      "target": "glossary:synthetic-media",
      "relationship": "references_term"
    },
    {
      "source": "glossary:synthetic-media",
      "target": "sub:DOM-INF-synthetic-media-manipulation",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-INF-synthetic-media-manipulation",
      "target": "glossary:media-manipulation",
      "relationship": "references_term"
    },
    {
      "source": "glossary:media-manipulation",
      "target": "sub:DOM-INF-synthetic-media-manipulation",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-INF-synthetic-media-manipulation",
      "target": "glossary:content-authenticity",
      "relationship": "references_term"
    },
    {
      "source": "glossary:content-authenticity",
      "target": "sub:DOM-INF-synthetic-media-manipulation",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-PRI-behavioral-profiling-without-consent",
      "target": "glossary:behavioral-profiling",
      "relationship": "references_term"
    },
    {
      "source": "glossary:behavioral-profiling",
      "target": "sub:DOM-PRI-behavioral-profiling-without-consent",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-PRI-behavioral-profiling-without-consent",
      "target": "glossary:consent",
      "relationship": "references_term"
    },
    {
      "source": "glossary:consent",
      "target": "sub:DOM-PRI-behavioral-profiling-without-consent",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-PRI-behavioral-profiling-without-consent",
      "target": "glossary:tracking",
      "relationship": "references_term"
    },
    {
      "source": "glossary:tracking",
      "target": "sub:DOM-PRI-behavioral-profiling-without-consent",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-PRI-biometric-exploitation",
      "target": "glossary:biometric-data",
      "relationship": "references_term"
    },
    {
      "source": "glossary:biometric-data",
      "target": "sub:DOM-PRI-biometric-exploitation",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-PRI-biometric-exploitation",
      "target": "glossary:facial-recognition",
      "relationship": "references_term"
    },
    {
      "source": "glossary:facial-recognition",
      "target": "sub:DOM-PRI-biometric-exploitation",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-PRI-biometric-exploitation",
      "target": "glossary:voice-cloning",
      "relationship": "references_term"
    },
    {
      "source": "glossary:voice-cloning",
      "target": "sub:DOM-PRI-biometric-exploitation",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-PRI-mass-surveillance-amplification",
      "target": "glossary:mass-surveillance",
      "relationship": "references_term"
    },
    {
      "source": "glossary:mass-surveillance",
      "target": "sub:DOM-PRI-mass-surveillance-amplification",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-PRI-mass-surveillance-amplification",
      "target": "glossary:facial-recognition",
      "relationship": "references_term"
    },
    {
      "source": "glossary:facial-recognition",
      "target": "sub:DOM-PRI-mass-surveillance-amplification",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-PRI-mass-surveillance-amplification",
      "target": "glossary:social-scoring",
      "relationship": "references_term"
    },
    {
      "source": "glossary:social-scoring",
      "target": "sub:DOM-PRI-mass-surveillance-amplification",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-PRI-re-identification-attacks",
      "target": "glossary:re-identification",
      "relationship": "references_term"
    },
    {
      "source": "glossary:re-identification",
      "target": "sub:DOM-PRI-re-identification-attacks",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-PRI-re-identification-attacks",
      "target": "glossary:anonymization",
      "relationship": "references_term"
    },
    {
      "source": "glossary:anonymization",
      "target": "sub:DOM-PRI-re-identification-attacks",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-PRI-re-identification-attacks",
      "target": "glossary:pseudonymization",
      "relationship": "references_term"
    },
    {
      "source": "glossary:pseudonymization",
      "target": "sub:DOM-PRI-re-identification-attacks",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-PRI-sensitive-attribute-inference",
      "target": "glossary:attribute-inference",
      "relationship": "references_term"
    },
    {
      "source": "glossary:attribute-inference",
      "target": "sub:DOM-PRI-sensitive-attribute-inference",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-PRI-sensitive-attribute-inference",
      "target": "glossary:sensitive-data",
      "relationship": "references_term"
    },
    {
      "source": "glossary:sensitive-data",
      "target": "sub:DOM-PRI-sensitive-attribute-inference",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-PRI-sensitive-attribute-inference",
      "target": "glossary:profiling",
      "relationship": "references_term"
    },
    {
      "source": "glossary:profiling",
      "target": "sub:DOM-PRI-sensitive-attribute-inference",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-adversarial-evasion",
      "target": "glossary:adversarial-attack",
      "relationship": "references_term"
    },
    {
      "source": "glossary:adversarial-attack",
      "target": "sub:DOM-SEC-adversarial-evasion",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-adversarial-evasion",
      "target": "glossary:evasion-attack",
      "relationship": "references_term"
    },
    {
      "source": "glossary:evasion-attack",
      "target": "sub:DOM-SEC-adversarial-evasion",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-adversarial-evasion",
      "target": "glossary:robustness",
      "relationship": "references_term"
    },
    {
      "source": "glossary:robustness",
      "target": "sub:DOM-SEC-adversarial-evasion",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-ai-supply-chain-attack",
      "target": "glossary:supply-chain-attack",
      "relationship": "references_term"
    },
    {
      "source": "glossary:supply-chain-attack",
      "target": "sub:DOM-SEC-ai-supply-chain-attack",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-ai-supply-chain-attack",
      "target": "glossary:backdoor-attack",
      "relationship": "references_term"
    },
    {
      "source": "glossary:backdoor-attack",
      "target": "sub:DOM-SEC-ai-supply-chain-attack",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-ai-supply-chain-attack",
      "target": "glossary:model-provenance",
      "relationship": "references_term"
    },
    {
      "source": "glossary:model-provenance",
      "target": "sub:DOM-SEC-ai-supply-chain-attack",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-ai-supply-chain-attack",
      "target": "glossary:data-poisoning",
      "relationship": "references_term"
    },
    {
      "source": "glossary:data-poisoning",
      "target": "sub:DOM-SEC-ai-supply-chain-attack",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-ai-morphed-malware",
      "target": "glossary:malware",
      "relationship": "references_term"
    },
    {
      "source": "glossary:malware",
      "target": "sub:DOM-SEC-ai-morphed-malware",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-ai-morphed-malware",
      "target": "glossary:polymorphic-malware",
      "relationship": "references_term"
    },
    {
      "source": "glossary:polymorphic-malware",
      "target": "sub:DOM-SEC-ai-morphed-malware",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-ai-morphed-malware",
      "target": "glossary:ai-generated-code",
      "relationship": "references_term"
    },
    {
      "source": "glossary:ai-generated-code",
      "target": "sub:DOM-SEC-ai-morphed-malware",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-social-engineering-via-ai",
      "target": "glossary:social-engineering",
      "relationship": "references_term"
    },
    {
      "source": "glossary:social-engineering",
      "target": "sub:DOM-SEC-social-engineering-via-ai",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-social-engineering-via-ai",
      "target": "glossary:phishing",
      "relationship": "references_term"
    },
    {
      "source": "glossary:phishing",
      "target": "sub:DOM-SEC-social-engineering-via-ai",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-social-engineering-via-ai",
      "target": "glossary:deepfake",
      "relationship": "references_term"
    },
    {
      "source": "glossary:deepfake",
      "target": "sub:DOM-SEC-social-engineering-via-ai",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-social-engineering-via-ai",
      "target": "glossary:voice-cloning",
      "relationship": "references_term"
    },
    {
      "source": "glossary:voice-cloning",
      "target": "sub:DOM-SEC-social-engineering-via-ai",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-automated-vulnerability-discovery",
      "target": "glossary:vulnerability-discovery",
      "relationship": "references_term"
    },
    {
      "source": "glossary:vulnerability-discovery",
      "target": "sub:DOM-SEC-automated-vulnerability-discovery",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-automated-vulnerability-discovery",
      "target": "glossary:zero-day",
      "relationship": "references_term"
    },
    {
      "source": "glossary:zero-day",
      "target": "sub:DOM-SEC-automated-vulnerability-discovery",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-automated-vulnerability-discovery",
      "target": "glossary:automated-exploit",
      "relationship": "references_term"
    },
    {
      "source": "glossary:automated-exploit",
      "target": "sub:DOM-SEC-automated-vulnerability-discovery",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-data-poisoning",
      "target": "glossary:data-poisoning",
      "relationship": "references_term"
    },
    {
      "source": "glossary:data-poisoning",
      "target": "sub:DOM-SEC-data-poisoning",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-data-poisoning",
      "target": "glossary:backdoor-attack",
      "relationship": "references_term"
    },
    {
      "source": "glossary:backdoor-attack",
      "target": "sub:DOM-SEC-data-poisoning",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-data-poisoning",
      "target": "glossary:training-data",
      "relationship": "references_term"
    },
    {
      "source": "glossary:training-data",
      "target": "sub:DOM-SEC-data-poisoning",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-jailbreak-guardrail-bypass",
      "target": "glossary:jailbreak-attack",
      "relationship": "references_term"
    },
    {
      "source": "glossary:jailbreak-attack",
      "target": "sub:DOM-SEC-jailbreak-guardrail-bypass",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-jailbreak-guardrail-bypass",
      "target": "glossary:alignment",
      "relationship": "references_term"
    },
    {
      "source": "glossary:alignment",
      "target": "sub:DOM-SEC-jailbreak-guardrail-bypass",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-jailbreak-guardrail-bypass",
      "target": "glossary:rlhf",
      "relationship": "references_term"
    },
    {
      "source": "glossary:rlhf",
      "target": "sub:DOM-SEC-jailbreak-guardrail-bypass",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-jailbreak-guardrail-bypass",
      "target": "glossary:guardrail",
      "relationship": "references_term"
    },
    {
      "source": "glossary:guardrail",
      "target": "sub:DOM-SEC-jailbreak-guardrail-bypass",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-model-inversion-data-extraction",
      "target": "glossary:model-inversion",
      "relationship": "references_term"
    },
    {
      "source": "glossary:model-inversion",
      "target": "sub:DOM-SEC-model-inversion-data-extraction",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-model-inversion-data-extraction",
      "target": "glossary:data-extraction",
      "relationship": "references_term"
    },
    {
      "source": "glossary:data-extraction",
      "target": "sub:DOM-SEC-model-inversion-data-extraction",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-model-inversion-data-extraction",
      "target": "glossary:membership-inference",
      "relationship": "references_term"
    },
    {
      "source": "glossary:membership-inference",
      "target": "sub:DOM-SEC-model-inversion-data-extraction",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-prompt-injection-attack",
      "target": "glossary:prompt-injection",
      "relationship": "references_term"
    },
    {
      "source": "glossary:prompt-injection",
      "target": "sub:DOM-SEC-prompt-injection-attack",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-prompt-injection-attack",
      "target": "glossary:system-prompt",
      "relationship": "references_term"
    },
    {
      "source": "glossary:system-prompt",
      "target": "sub:DOM-SEC-prompt-injection-attack",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-prompt-injection-attack",
      "target": "glossary:retrieval-augmented-generation",
      "relationship": "references_term"
    },
    {
      "source": "glossary:retrieval-augmented-generation",
      "target": "sub:DOM-SEC-prompt-injection-attack",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SEC-prompt-injection-attack",
      "target": "glossary:adversarial-attack",
      "relationship": "references_term"
    },
    {
      "source": "glossary:adversarial-attack",
      "target": "sub:DOM-SEC-prompt-injection-attack",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "target": "glossary:trust-erosion",
      "relationship": "references_term"
    },
    {
      "source": "glossary:trust-erosion",
      "target": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "target": "glossary:institutional-trust",
      "relationship": "references_term"
    },
    {
      "source": "glossary:institutional-trust",
      "target": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "target": "glossary:democratic-integrity",
      "relationship": "references_term"
    },
    {
      "source": "glossary:democratic-integrity",
      "target": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SYS-ai-assisted-biological-threat-design",
      "target": "glossary:biosecurity",
      "relationship": "references_term"
    },
    {
      "source": "glossary:biosecurity",
      "target": "sub:DOM-SYS-ai-assisted-biological-threat-design",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SYS-ai-assisted-biological-threat-design",
      "target": "glossary:dual-use",
      "relationship": "references_term"
    },
    {
      "source": "glossary:dual-use",
      "target": "sub:DOM-SYS-ai-assisted-biological-threat-design",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SYS-ai-assisted-biological-threat-design",
      "target": "glossary:biological-threat",
      "relationship": "references_term"
    },
    {
      "source": "glossary:biological-threat",
      "target": "sub:DOM-SYS-ai-assisted-biological-threat-design",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SYS-infrastructure-dependency-collapse",
      "target": "glossary:infrastructure-dependency",
      "relationship": "references_term"
    },
    {
      "source": "glossary:infrastructure-dependency",
      "target": "sub:DOM-SYS-infrastructure-dependency-collapse",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SYS-infrastructure-dependency-collapse",
      "target": "glossary:cascading-failure",
      "relationship": "references_term"
    },
    {
      "source": "glossary:cascading-failure",
      "target": "sub:DOM-SYS-infrastructure-dependency-collapse",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SYS-infrastructure-dependency-collapse",
      "target": "glossary:single-point-of-failure",
      "relationship": "references_term"
    },
    {
      "source": "glossary:single-point-of-failure",
      "target": "sub:DOM-SYS-infrastructure-dependency-collapse",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SYS-lethal-autonomous-weapon-systems",
      "target": "glossary:autonomous-weapons",
      "relationship": "references_term"
    },
    {
      "source": "glossary:autonomous-weapons",
      "target": "sub:DOM-SYS-lethal-autonomous-weapon-systems",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SYS-lethal-autonomous-weapon-systems",
      "target": "glossary:laws",
      "relationship": "references_term"
    },
    {
      "source": "glossary:laws",
      "target": "sub:DOM-SYS-lethal-autonomous-weapon-systems",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SYS-lethal-autonomous-weapon-systems",
      "target": "glossary:international-humanitarian-law",
      "relationship": "references_term"
    },
    {
      "source": "glossary:international-humanitarian-law",
      "target": "sub:DOM-SYS-lethal-autonomous-weapon-systems",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SYS-strategic-misalignment",
      "target": "glossary:alignment",
      "relationship": "references_term"
    },
    {
      "source": "glossary:alignment",
      "target": "sub:DOM-SYS-strategic-misalignment",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SYS-strategic-misalignment",
      "target": "glossary:misalignment",
      "relationship": "references_term"
    },
    {
      "source": "glossary:misalignment",
      "target": "sub:DOM-SYS-strategic-misalignment",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SYS-strategic-misalignment",
      "target": "glossary:existential-risk",
      "relationship": "references_term"
    },
    {
      "source": "glossary:existential-risk",
      "target": "sub:DOM-SYS-strategic-misalignment",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SYS-uncontrolled-recursive-self-improvement",
      "target": "glossary:recursive-self-improvement",
      "relationship": "references_term"
    },
    {
      "source": "glossary:recursive-self-improvement",
      "target": "sub:DOM-SYS-uncontrolled-recursive-self-improvement",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SYS-uncontrolled-recursive-self-improvement",
      "target": "glossary:superintelligence",
      "relationship": "references_term"
    },
    {
      "source": "glossary:superintelligence",
      "target": "sub:DOM-SYS-uncontrolled-recursive-self-improvement",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-SYS-uncontrolled-recursive-self-improvement",
      "target": "glossary:existential-risk",
      "relationship": "references_term"
    },
    {
      "source": "glossary:existential-risk",
      "target": "sub:DOM-SYS-uncontrolled-recursive-self-improvement",
      "relationship": "explains"
    },
    {
      "source": "sub:DOM-AGT-agent-to-agent-propagation",
      "target": "sub:DOM-AGT-cascading-hallucinations",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-AGT-agent-to-agent-propagation",
      "target": "sub:DOM-AGT-multi-agent-coordination-failures",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-AGT-cascading-hallucinations",
      "target": "sub:DOM-INF-misinformation-hallucinated-content",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-AGT-cascading-hallucinations",
      "target": "sub:DOM-AGT-agent-to-agent-propagation",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-AGT-goal-drift",
      "target": "sub:DOM-CTL-implicit-authority-transfer",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-AGT-goal-drift",
      "target": "sub:DOM-SYS-strategic-misalignment",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-AGT-memory-poisoning",
      "target": "sub:DOM-SEC-data-poisoning",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-AGT-memory-poisoning",
      "target": "sub:DOM-AGT-goal-drift",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-AGT-memory-poisoning",
      "target": "sub:DOM-SEC-adversarial-evasion",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-AGT-multi-agent-coordination-failures",
      "target": "sub:DOM-AGT-agent-to-agent-propagation",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-AGT-multi-agent-coordination-failures",
      "target": "sub:DOM-SYS-infrastructure-dependency-collapse",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-AGT-specification-gaming",
      "target": "sub:DOM-AGT-goal-drift",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-AGT-specification-gaming",
      "target": "sub:DOM-AGT-multi-agent-coordination-failures",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-AGT-specification-gaming",
      "target": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "target": "sub:DOM-SEC-automated-vulnerability-discovery",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "target": "sub:DOM-CTL-unsafe-human-in-the-loop-failures",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SOC-algorithmic-amplification",
      "target": "sub:DOM-INF-consensus-reality-erosion",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SOC-algorithmic-amplification",
      "target": "sub:DOM-SOC-representational-harm",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SOC-allocational-harm",
      "target": "sub:DOM-SOC-proxy-discrimination",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SOC-allocational-harm",
      "target": "sub:DOM-SOC-data-imbalance-bias",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SOC-data-imbalance-bias",
      "target": "sub:DOM-SOC-allocational-harm",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SOC-data-imbalance-bias",
      "target": "sub:DOM-SEC-data-poisoning",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SOC-proxy-discrimination",
      "target": "sub:DOM-PRI-sensitive-attribute-inference",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SOC-proxy-discrimination",
      "target": "sub:DOM-SOC-allocational-harm",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SOC-representational-harm",
      "target": "sub:DOM-SOC-algorithmic-amplification",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SOC-representational-harm",
      "target": "sub:DOM-INF-disinformation-campaigns",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-ECO-automation-induced-job-degradation",
      "target": "sub:DOM-CTL-loss-of-human-agency",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-ECO-automation-induced-job-degradation",
      "target": "sub:DOM-ECO-decision-loop-automation",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-ECO-decision-loop-automation",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-ECO-decision-loop-automation",
      "target": "sub:DOM-CTL-unsafe-human-in-the-loop-failures",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-ECO-economic-dependency-on-black-box-systems",
      "target": "sub:DOM-ECO-power-data-concentration",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-ECO-economic-dependency-on-black-box-systems",
      "target": "sub:DOM-CTL-implicit-authority-transfer",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-ECO-market-manipulation-via-ai",
      "target": "sub:DOM-ECO-decision-loop-automation",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-ECO-market-manipulation-via-ai",
      "target": "sub:DOM-SEC-automated-vulnerability-discovery",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-ECO-power-data-concentration",
      "target": "sub:DOM-ECO-economic-dependency-on-black-box-systems",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-ECO-power-data-concentration",
      "target": "sub:DOM-SYS-infrastructure-dependency-collapse",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-CTL-overreliance-automation-bias",
      "target": "sub:DOM-ECO-decision-loop-automation",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-CTL-overreliance-automation-bias",
      "target": "sub:DOM-CTL-unsafe-human-in-the-loop-failures",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-CTL-deceptive-manipulative-interfaces",
      "target": "sub:DOM-PRI-behavioral-profiling-without-consent",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-CTL-deceptive-manipulative-interfaces",
      "target": "sub:DOM-SOC-algorithmic-amplification",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-CTL-implicit-authority-transfer",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-CTL-implicit-authority-transfer",
      "target": "sub:DOM-ECO-economic-dependency-on-black-box-systems",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-CTL-loss-of-human-agency",
      "target": "sub:DOM-CTL-implicit-authority-transfer",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-CTL-loss-of-human-agency",
      "target": "sub:DOM-PRI-behavioral-profiling-without-consent",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-CTL-safety-governance-override",
      "target": "sub:DOM-CTL-unsafe-human-in-the-loop-failures",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-CTL-safety-governance-override",
      "target": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-CTL-safety-governance-override",
      "target": "sub:DOM-CTL-loss-of-human-agency",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-CTL-unsafe-human-in-the-loop-failures",
      "target": "sub:DOM-CTL-overreliance-automation-bias",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-CTL-unsafe-human-in-the-loop-failures",
      "target": "sub:DOM-ECO-decision-loop-automation",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-INF-ai-enabled-fraud",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-INF-ai-enabled-fraud",
      "target": "sub:DOM-INF-synthetic-media-manipulation",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-INF-ai-enabled-fraud",
      "target": "sub:DOM-SEC-social-engineering-via-ai",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-INF-consensus-reality-erosion",
      "target": "sub:DOM-INF-misinformation-hallucinated-content",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-INF-consensus-reality-erosion",
      "target": "sub:DOM-INF-disinformation-campaigns",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-INF-deepfake-identity-hijacking",
      "target": "sub:DOM-INF-synthetic-media-manipulation",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-INF-deepfake-identity-hijacking",
      "target": "sub:DOM-SEC-adversarial-evasion",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-INF-deepfake-identity-hijacking",
      "target": "sub:DOM-PRI-biometric-exploitation",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-INF-disinformation-campaigns",
      "target": "sub:DOM-INF-misinformation-hallucinated-content",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-INF-disinformation-campaigns",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-INF-misinformation-hallucinated-content",
      "target": "sub:DOM-INF-disinformation-campaigns",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-INF-misinformation-hallucinated-content",
      "target": "sub:DOM-INF-consensus-reality-erosion",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-INF-synthetic-media-manipulation",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-INF-synthetic-media-manipulation",
      "target": "sub:DOM-INF-disinformation-campaigns",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-PRI-behavioral-profiling-without-consent",
      "target": "sub:DOM-PRI-sensitive-attribute-inference",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-PRI-behavioral-profiling-without-consent",
      "target": "sub:DOM-CTL-deceptive-manipulative-interfaces",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-PRI-biometric-exploitation",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-PRI-biometric-exploitation",
      "target": "sub:DOM-PRI-mass-surveillance-amplification",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-PRI-mass-surveillance-amplification",
      "target": "sub:DOM-PRI-biometric-exploitation",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-PRI-mass-surveillance-amplification",
      "target": "sub:DOM-PRI-behavioral-profiling-without-consent",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-PRI-re-identification-attacks",
      "target": "sub:DOM-SEC-model-inversion-data-extraction",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-PRI-re-identification-attacks",
      "target": "sub:DOM-PRI-sensitive-attribute-inference",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-PRI-sensitive-attribute-inference",
      "target": "sub:DOM-SOC-proxy-discrimination",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-PRI-sensitive-attribute-inference",
      "target": "sub:DOM-SEC-model-inversion-data-extraction",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SEC-adversarial-evasion",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SEC-adversarial-evasion",
      "target": "sub:DOM-PRI-biometric-exploitation",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SEC-ai-supply-chain-attack",
      "target": "sub:DOM-SEC-data-poisoning",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SEC-ai-supply-chain-attack",
      "target": "sub:DOM-SEC-automated-vulnerability-discovery",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SEC-ai-supply-chain-attack",
      "target": "sub:DOM-SEC-model-inversion-data-extraction",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SEC-ai-morphed-malware",
      "target": "sub:DOM-SEC-automated-vulnerability-discovery",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SEC-ai-morphed-malware",
      "target": "sub:DOM-SEC-adversarial-evasion",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SEC-social-engineering-via-ai",
      "target": "sub:DOM-INF-deepfake-identity-hijacking",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SEC-social-engineering-via-ai",
      "target": "sub:DOM-INF-synthetic-media-manipulation",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SEC-automated-vulnerability-discovery",
      "target": "sub:DOM-SEC-ai-morphed-malware",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SEC-automated-vulnerability-discovery",
      "target": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SEC-data-poisoning",
      "target": "sub:DOM-SEC-ai-morphed-malware",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SEC-data-poisoning",
      "target": "sub:DOM-SOC-data-imbalance-bias",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SEC-jailbreak-guardrail-bypass",
      "target": "sub:DOM-CTL-unsafe-human-in-the-loop-failures",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SEC-jailbreak-guardrail-bypass",
      "target": "sub:DOM-SEC-adversarial-evasion",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SEC-model-inversion-data-extraction",
      "target": "sub:DOM-PRI-re-identification-attacks",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SEC-model-inversion-data-extraction",
      "target": "sub:DOM-PRI-sensitive-attribute-inference",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SEC-prompt-injection-attack",
      "target": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SEC-prompt-injection-attack",
      "target": "sub:DOM-SEC-adversarial-evasion",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SEC-prompt-injection-attack",
      "target": "sub:DOM-SEC-model-inversion-data-extraction",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SEC-prompt-injection-attack",
      "target": "sub:DOM-AGT-memory-poisoning",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "target": "sub:DOM-INF-consensus-reality-erosion",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SYS-accumulative-risk-trust-erosion",
      "target": "sub:DOM-SOC-allocational-harm",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SYS-ai-assisted-biological-threat-design",
      "target": "sub:DOM-SEC-automated-vulnerability-discovery",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SYS-ai-assisted-biological-threat-design",
      "target": "sub:DOM-AGT-tool-misuse-privilege-escalation",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SYS-infrastructure-dependency-collapse",
      "target": "sub:DOM-ECO-power-data-concentration",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SYS-infrastructure-dependency-collapse",
      "target": "sub:DOM-AGT-multi-agent-coordination-failures",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SYS-lethal-autonomous-weapon-systems",
      "target": "sub:DOM-CTL-unsafe-human-in-the-loop-failures",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SYS-lethal-autonomous-weapon-systems",
      "target": "sub:DOM-SYS-strategic-misalignment",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SYS-strategic-misalignment",
      "target": "sub:DOM-AGT-goal-drift",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SYS-strategic-misalignment",
      "target": "sub:DOM-CTL-loss-of-human-agency",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SYS-uncontrolled-recursive-self-improvement",
      "target": "sub:DOM-AGT-goal-drift",
      "relationship": "related_pattern"
    },
    {
      "source": "sub:DOM-SYS-uncontrolled-recursive-self-improvement",
      "target": "sub:DOM-SYS-strategic-misalignment",
      "relationship": "related_pattern"
    },
    {
      "source": "glossary:accountability",
      "target": "glossary:human-in-the-loop",
      "relationship": "related_term"
    },
    {
      "source": "glossary:accountability",
      "target": "glossary:explainability",
      "relationship": "related_term"
    },
    {
      "source": "glossary:accountability",
      "target": "glossary:human-agency",
      "relationship": "related_term"
    },
    {
      "source": "glossary:adversarial-attack",
      "target": "glossary:training-data",
      "relationship": "related_term"
    },
    {
      "source": "glossary:adversarial-attack",
      "target": "glossary:automated-vulnerability-discovery",
      "relationship": "related_term"
    },
    {
      "source": "glossary:adversarial-perturbation",
      "target": "glossary:adversarial-attack",
      "relationship": "related_term"
    },
    {
      "source": "glossary:adversarial-perturbation",
      "target": "glossary:evasion-attack",
      "relationship": "related_term"
    },
    {
      "source": "glossary:adversarial-perturbation",
      "target": "glossary:adversarial-training",
      "relationship": "related_term"
    },
    {
      "source": "glossary:adversarial-perturbation",
      "target": "glossary:robustness",
      "relationship": "related_term"
    },
    {
      "source": "glossary:adversarial-training",
      "target": "glossary:adversarial-attack",
      "relationship": "related_term"
    },
    {
      "source": "glossary:adversarial-training",
      "target": "glossary:adversarial-perturbation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:adversarial-training",
      "target": "glossary:robustness",
      "relationship": "related_term"
    },
    {
      "source": "glossary:adversarial-training",
      "target": "glossary:red-teaming",
      "relationship": "related_term"
    },
    {
      "source": "glossary:agent-framework",
      "target": "glossary:agentic-ai",
      "relationship": "related_term"
    },
    {
      "source": "glossary:agent-framework",
      "target": "glossary:function-calling",
      "relationship": "related_term"
    },
    {
      "source": "glossary:agent-framework",
      "target": "glossary:model-context-protocol",
      "relationship": "related_term"
    },
    {
      "source": "glossary:agent-framework",
      "target": "glossary:multi-agent-system",
      "relationship": "related_term"
    },
    {
      "source": "glossary:agent-propagation",
      "target": "glossary:multi-agent-system",
      "relationship": "related_term"
    },
    {
      "source": "glossary:agent-propagation",
      "target": "glossary:cascading-failure",
      "relationship": "related_term"
    },
    {
      "source": "glossary:agent-propagation",
      "target": "glossary:agentic-ai",
      "relationship": "related_term"
    },
    {
      "source": "glossary:agent-safety",
      "target": "glossary:agentic-ai",
      "relationship": "related_term"
    },
    {
      "source": "glossary:agent-safety",
      "target": "glossary:alignment",
      "relationship": "related_term"
    },
    {
      "source": "glossary:agent-safety",
      "target": "glossary:privilege-escalation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:agent-safety",
      "target": "glossary:human-in-the-loop",
      "relationship": "related_term"
    },
    {
      "source": "glossary:agentic-ai",
      "target": "glossary:large-language-model",
      "relationship": "related_term"
    },
    {
      "source": "glossary:agentic-ai",
      "target": "glossary:human-in-the-loop",
      "relationship": "related_term"
    },
    {
      "source": "glossary:ai-risk-management-framework",
      "target": "glossary:governance",
      "relationship": "related_term"
    },
    {
      "source": "glossary:ai-risk-management-framework",
      "target": "glossary:owasp-top-10-llm",
      "relationship": "related_term"
    },
    {
      "source": "glossary:ai-risk-management-framework",
      "target": "glossary:mitre-atlas",
      "relationship": "related_term"
    },
    {
      "source": "glossary:ai-risk-management-framework",
      "target": "glossary:gdpr",
      "relationship": "related_term"
    },
    {
      "source": "glossary:ai-risk-management-framework",
      "target": "glossary:explainability",
      "relationship": "related_term"
    },
    {
      "source": "glossary:ai-safety",
      "target": "glossary:misalignment",
      "relationship": "related_term"
    },
    {
      "source": "glossary:ai-safety",
      "target": "glossary:guardrail",
      "relationship": "related_term"
    },
    {
      "source": "glossary:ai-safety",
      "target": "glossary:human-in-the-loop",
      "relationship": "related_term"
    },
    {
      "source": "glossary:ai-generated-code",
      "target": "glossary:large-language-model",
      "relationship": "related_term"
    },
    {
      "source": "glossary:ai-generated-code",
      "target": "glossary:malware",
      "relationship": "related_term"
    },
    {
      "source": "glossary:ai-generated-code",
      "target": "glossary:dual-use",
      "relationship": "related_term"
    },
    {
      "source": "glossary:alert-fatigue",
      "target": "glossary:human-in-the-loop",
      "relationship": "related_term"
    },
    {
      "source": "glossary:alert-fatigue",
      "target": "glossary:automation-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:alert-fatigue",
      "target": "glossary:human-agency",
      "relationship": "related_term"
    },
    {
      "source": "glossary:algorithmic-amplification",
      "target": "glossary:disinformation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:algorithmic-amplification",
      "target": "glossary:misinformation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:algorithmic-amplification",
      "target": "glossary:representational-harm",
      "relationship": "related_term"
    },
    {
      "source": "glossary:algorithmic-bias",
      "target": "glossary:proxy-variable",
      "relationship": "related_term"
    },
    {
      "source": "glossary:algorithmic-bias",
      "target": "glossary:training-data",
      "relationship": "related_term"
    },
    {
      "source": "glossary:algorithmic-bias",
      "target": "glossary:automated-decision-making",
      "relationship": "related_term"
    },
    {
      "source": "glossary:algorithmic-trading",
      "target": "glossary:market-manipulation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:algorithmic-trading",
      "target": "glossary:automated-decision-making",
      "relationship": "related_term"
    },
    {
      "source": "glossary:algorithmic-trading",
      "target": "glossary:black-box-system",
      "relationship": "related_term"
    },
    {
      "source": "glossary:alignment",
      "target": "glossary:human-in-the-loop",
      "relationship": "related_term"
    },
    {
      "source": "glossary:alignment",
      "target": "glossary:agentic-ai",
      "relationship": "related_term"
    },
    {
      "source": "glossary:alignment",
      "target": "glossary:superintelligence",
      "relationship": "related_term"
    },
    {
      "source": "glossary:allocational-harm",
      "target": "glossary:algorithmic-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:allocational-harm",
      "target": "glossary:proxy-discrimination",
      "relationship": "related_term"
    },
    {
      "source": "glossary:allocational-harm",
      "target": "glossary:fairness",
      "relationship": "related_term"
    },
    {
      "source": "glossary:allocational-harm",
      "target": "glossary:proxy-variable",
      "relationship": "related_term"
    },
    {
      "source": "glossary:anonymization",
      "target": "glossary:re-identification",
      "relationship": "related_term"
    },
    {
      "source": "glossary:anonymization",
      "target": "glossary:data-protection",
      "relationship": "related_term"
    },
    {
      "source": "glossary:anonymization",
      "target": "glossary:gdpr",
      "relationship": "related_term"
    },
    {
      "source": "glossary:artificial-general-intelligence",
      "target": "glossary:superintelligence",
      "relationship": "related_term"
    },
    {
      "source": "glossary:artificial-general-intelligence",
      "target": "glossary:existential-risk",
      "relationship": "related_term"
    },
    {
      "source": "glossary:artificial-general-intelligence",
      "target": "glossary:recursive-self-improvement",
      "relationship": "related_term"
    },
    {
      "source": "glossary:artificial-general-intelligence",
      "target": "glossary:alignment",
      "relationship": "related_term"
    },
    {
      "source": "glossary:artificial-general-intelligence",
      "target": "glossary:misalignment",
      "relationship": "related_term"
    },
    {
      "source": "glossary:attack-surface",
      "target": "glossary:adversarial-attack",
      "relationship": "related_term"
    },
    {
      "source": "glossary:attack-surface",
      "target": "glossary:least-privilege",
      "relationship": "related_term"
    },
    {
      "source": "glossary:attack-surface",
      "target": "glossary:defense-in-depth",
      "relationship": "related_term"
    },
    {
      "source": "glossary:attribute-inference",
      "target": "glossary:re-identification",
      "relationship": "related_term"
    },
    {
      "source": "glossary:attribute-inference",
      "target": "glossary:biometric-data",
      "relationship": "related_term"
    },
    {
      "source": "glossary:attribute-inference",
      "target": "glossary:mass-surveillance",
      "relationship": "related_term"
    },
    {
      "source": "glossary:authority-transfer",
      "target": "glossary:human-agency",
      "relationship": "related_term"
    },
    {
      "source": "glossary:authority-transfer",
      "target": "glossary:human-in-the-loop",
      "relationship": "related_term"
    },
    {
      "source": "glossary:authority-transfer",
      "target": "glossary:automation-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:authority-transfer",
      "target": "glossary:accountability",
      "relationship": "related_term"
    },
    {
      "source": "glossary:automated-decision-making",
      "target": "glossary:algorithmic-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:automated-decision-making",
      "target": "glossary:human-in-the-loop",
      "relationship": "related_term"
    },
    {
      "source": "glossary:automated-decision-making",
      "target": "glossary:automation-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:automated-exploit",
      "target": "glossary:automated-vulnerability-discovery",
      "relationship": "related_term"
    },
    {
      "source": "glossary:automated-exploit",
      "target": "glossary:cyber-espionage",
      "relationship": "related_term"
    },
    {
      "source": "glossary:automated-exploit",
      "target": "glossary:privilege-escalation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:automated-vulnerability-discovery",
      "target": "glossary:agentic-ai",
      "relationship": "related_term"
    },
    {
      "source": "glossary:automated-vulnerability-discovery",
      "target": "glossary:cyber-espionage",
      "relationship": "related_term"
    },
    {
      "source": "glossary:automation",
      "target": "glossary:job-displacement",
      "relationship": "related_term"
    },
    {
      "source": "glossary:automation",
      "target": "glossary:automated-decision-making",
      "relationship": "related_term"
    },
    {
      "source": "glossary:automation",
      "target": "glossary:automation-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:automation-bias",
      "target": "glossary:automated-decision-making",
      "relationship": "related_term"
    },
    {
      "source": "glossary:automation-bias",
      "target": "glossary:human-in-the-loop",
      "relationship": "related_term"
    },
    {
      "source": "glossary:automation-bias",
      "target": "glossary:algorithmic-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:autonomous-vehicle",
      "target": "glossary:human-in-the-loop",
      "relationship": "related_term"
    },
    {
      "source": "glossary:autonomous-vehicle",
      "target": "glossary:automated-decision-making",
      "relationship": "related_term"
    },
    {
      "source": "glossary:autonomous-weapons",
      "target": "glossary:autonomous-vehicle",
      "relationship": "related_term"
    },
    {
      "source": "glossary:autonomous-weapons",
      "target": "glossary:human-in-the-loop",
      "relationship": "related_term"
    },
    {
      "source": "glossary:autonomous-weapons",
      "target": "glossary:agentic-ai",
      "relationship": "related_term"
    },
    {
      "source": "glossary:autonomy",
      "target": "glossary:human-agency",
      "relationship": "related_term"
    },
    {
      "source": "glossary:autonomy",
      "target": "glossary:human-in-the-loop",
      "relationship": "related_term"
    },
    {
      "source": "glossary:autonomy",
      "target": "glossary:automated-decision-making",
      "relationship": "related_term"
    },
    {
      "source": "glossary:backdoor-attack",
      "target": "glossary:data-poisoning",
      "relationship": "related_term"
    },
    {
      "source": "glossary:backdoor-attack",
      "target": "glossary:training-data",
      "relationship": "related_term"
    },
    {
      "source": "glossary:backdoor-attack",
      "target": "glossary:adversarial-attack",
      "relationship": "related_term"
    },
    {
      "source": "glossary:behavioral-profiling",
      "target": "glossary:mass-surveillance",
      "relationship": "related_term"
    },
    {
      "source": "glossary:behavioral-profiling",
      "target": "glossary:biometric-data",
      "relationship": "related_term"
    },
    {
      "source": "glossary:behavioral-profiling",
      "target": "glossary:data-protection",
      "relationship": "related_term"
    },
    {
      "source": "glossary:behavioral-profiling",
      "target": "glossary:re-identification",
      "relationship": "related_term"
    },
    {
      "source": "glossary:biological-threat",
      "target": "glossary:biosecurity",
      "relationship": "related_term"
    },
    {
      "source": "glossary:biological-threat",
      "target": "glossary:dual-use",
      "relationship": "related_term"
    },
    {
      "source": "glossary:biological-threat",
      "target": "glossary:large-language-model",
      "relationship": "related_term"
    },
    {
      "source": "glossary:biometric-data",
      "target": "glossary:facial-recognition",
      "relationship": "related_term"
    },
    {
      "source": "glossary:biometric-data",
      "target": "glossary:mass-surveillance",
      "relationship": "related_term"
    },
    {
      "source": "glossary:biometric-data",
      "target": "glossary:data-protection",
      "relationship": "related_term"
    },
    {
      "source": "glossary:biosecurity",
      "target": "glossary:dual-use",
      "relationship": "related_term"
    },
    {
      "source": "glossary:biosecurity",
      "target": "glossary:alignment",
      "relationship": "related_term"
    },
    {
      "source": "glossary:biosecurity",
      "target": "glossary:large-language-model",
      "relationship": "related_term"
    },
    {
      "source": "glossary:black-box-system",
      "target": "glossary:explainability",
      "relationship": "related_term"
    },
    {
      "source": "glossary:black-box-system",
      "target": "glossary:automated-decision-making",
      "relationship": "related_term"
    },
    {
      "source": "glossary:black-box-system",
      "target": "glossary:algorithmic-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:business-email-compromise",
      "target": "glossary:phishing",
      "relationship": "related_term"
    },
    {
      "source": "glossary:business-email-compromise",
      "target": "glossary:deepfake",
      "relationship": "related_term"
    },
    {
      "source": "glossary:business-email-compromise",
      "target": "glossary:vishing",
      "relationship": "related_term"
    },
    {
      "source": "glossary:c2pa",
      "target": "glossary:content-authenticity",
      "relationship": "related_term"
    },
    {
      "source": "glossary:c2pa",
      "target": "glossary:digital-watermarking",
      "relationship": "related_term"
    },
    {
      "source": "glossary:c2pa",
      "target": "glossary:synthetic-media",
      "relationship": "related_term"
    },
    {
      "source": "glossary:c2pa",
      "target": "glossary:deepfake",
      "relationship": "related_term"
    },
    {
      "source": "glossary:cascading-failure",
      "target": "glossary:agentic-ai",
      "relationship": "related_term"
    },
    {
      "source": "glossary:cascading-failure",
      "target": "glossary:hallucination",
      "relationship": "related_term"
    },
    {
      "source": "glossary:cascading-failure",
      "target": "glossary:multi-agent-system",
      "relationship": "related_term"
    },
    {
      "source": "glossary:chain-of-thought",
      "target": "glossary:hallucination",
      "relationship": "related_term"
    },
    {
      "source": "glossary:chain-of-thought",
      "target": "glossary:agentic-ai",
      "relationship": "related_term"
    },
    {
      "source": "glossary:chain-of-thought",
      "target": "glossary:large-language-model",
      "relationship": "related_term"
    },
    {
      "source": "glossary:chain-of-thought",
      "target": "glossary:overreliance",
      "relationship": "related_term"
    },
    {
      "source": "glossary:chatbot",
      "target": "glossary:large-language-model",
      "relationship": "related_term"
    },
    {
      "source": "glossary:chatbot",
      "target": "glossary:ai-safety",
      "relationship": "related_term"
    },
    {
      "source": "glossary:chatbot",
      "target": "glossary:content-moderation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:complacency",
      "target": "glossary:automation-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:complacency",
      "target": "glossary:human-in-the-loop",
      "relationship": "related_term"
    },
    {
      "source": "glossary:complacency",
      "target": "glossary:alert-fatigue",
      "relationship": "related_term"
    },
    {
      "source": "glossary:confabulation",
      "target": "glossary:hallucination",
      "relationship": "related_term"
    },
    {
      "source": "glossary:confabulation",
      "target": "glossary:misinformation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:confabulation",
      "target": "glossary:large-language-model",
      "relationship": "related_term"
    },
    {
      "source": "glossary:consent",
      "target": "glossary:data-protection",
      "relationship": "related_term"
    },
    {
      "source": "glossary:consent",
      "target": "glossary:gdpr",
      "relationship": "related_term"
    },
    {
      "source": "glossary:consent",
      "target": "glossary:behavioral-profiling",
      "relationship": "related_term"
    },
    {
      "source": "glossary:consent",
      "target": "glossary:anonymization",
      "relationship": "related_term"
    },
    {
      "source": "glossary:contagion",
      "target": "glossary:agent-propagation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:contagion",
      "target": "glossary:cascading-failure",
      "relationship": "related_term"
    },
    {
      "source": "glossary:contagion",
      "target": "glossary:multi-agent-system",
      "relationship": "related_term"
    },
    {
      "source": "glossary:content-authenticity",
      "target": "glossary:synthetic-media",
      "relationship": "related_term"
    },
    {
      "source": "glossary:content-authenticity",
      "target": "glossary:deepfake",
      "relationship": "related_term"
    },
    {
      "source": "glossary:content-authenticity",
      "target": "glossary:disinformation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:content-authenticity",
      "target": "glossary:misinformation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:content-moderation",
      "target": "glossary:guardrail",
      "relationship": "related_term"
    },
    {
      "source": "glossary:content-moderation",
      "target": "glossary:ai-safety",
      "relationship": "related_term"
    },
    {
      "source": "glossary:content-moderation",
      "target": "glossary:chatbot",
      "relationship": "related_term"
    },
    {
      "source": "glossary:context-injection",
      "target": "glossary:memory-poisoning",
      "relationship": "related_term"
    },
    {
      "source": "glossary:context-injection",
      "target": "glossary:agentic-ai",
      "relationship": "related_term"
    },
    {
      "source": "glossary:context-injection",
      "target": "glossary:data-poisoning",
      "relationship": "related_term"
    },
    {
      "source": "glossary:context-window",
      "target": "glossary:large-language-model",
      "relationship": "related_term"
    },
    {
      "source": "glossary:context-window",
      "target": "glossary:retrieval-augmented-generation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:context-window",
      "target": "glossary:persistent-memory",
      "relationship": "related_term"
    },
    {
      "source": "glossary:context-window",
      "target": "glossary:hallucination",
      "relationship": "related_term"
    },
    {
      "source": "glossary:coordinated-inauthentic-behavior",
      "target": "glossary:disinformation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:coordinated-inauthentic-behavior",
      "target": "glossary:algorithmic-amplification",
      "relationship": "related_term"
    },
    {
      "source": "glossary:coordinated-inauthentic-behavior",
      "target": "glossary:election-interference",
      "relationship": "related_term"
    },
    {
      "source": "glossary:coordination-failure",
      "target": "glossary:multi-agent-system",
      "relationship": "related_term"
    },
    {
      "source": "glossary:coordination-failure",
      "target": "glossary:cascading-failure",
      "relationship": "related_term"
    },
    {
      "source": "glossary:coordination-failure",
      "target": "glossary:goal-drift",
      "relationship": "related_term"
    },
    {
      "source": "glossary:cyber-espionage",
      "target": "glossary:automated-vulnerability-discovery",
      "relationship": "related_term"
    },
    {
      "source": "glossary:cyber-espionage",
      "target": "glossary:agentic-ai",
      "relationship": "related_term"
    },
    {
      "source": "glossary:cyber-espionage",
      "target": "glossary:phishing",
      "relationship": "related_term"
    },
    {
      "source": "glossary:dark-pattern",
      "target": "glossary:human-agency",
      "relationship": "related_term"
    },
    {
      "source": "glossary:dark-pattern",
      "target": "glossary:automation-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:dark-pattern",
      "target": "glossary:human-in-the-loop",
      "relationship": "related_term"
    },
    {
      "source": "glossary:data-bias",
      "target": "glossary:algorithmic-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:data-bias",
      "target": "glossary:training-data",
      "relationship": "related_term"
    },
    {
      "source": "glossary:data-bias",
      "target": "glossary:fairness",
      "relationship": "related_term"
    },
    {
      "source": "glossary:data-bias",
      "target": "glossary:representational-harm",
      "relationship": "related_term"
    },
    {
      "source": "glossary:data-concentration",
      "target": "glossary:black-box-system",
      "relationship": "related_term"
    },
    {
      "source": "glossary:data-concentration",
      "target": "glossary:market-manipulation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:data-concentration",
      "target": "glossary:job-displacement",
      "relationship": "related_term"
    },
    {
      "source": "glossary:data-extraction",
      "target": "glossary:model-inversion",
      "relationship": "related_term"
    },
    {
      "source": "glossary:data-extraction",
      "target": "glossary:data-leakage",
      "relationship": "related_term"
    },
    {
      "source": "glossary:data-extraction",
      "target": "glossary:training-data",
      "relationship": "related_term"
    },
    {
      "source": "glossary:data-extraction",
      "target": "glossary:re-identification",
      "relationship": "related_term"
    },
    {
      "source": "glossary:data-leakage",
      "target": "glossary:training-data",
      "relationship": "related_term"
    },
    {
      "source": "glossary:data-leakage",
      "target": "glossary:large-language-model",
      "relationship": "related_term"
    },
    {
      "source": "glossary:data-leakage",
      "target": "glossary:data-protection",
      "relationship": "related_term"
    },
    {
      "source": "glossary:data-poisoning",
      "target": "glossary:training-data",
      "relationship": "related_term"
    },
    {
      "source": "glossary:data-poisoning",
      "target": "glossary:adversarial-attack",
      "relationship": "related_term"
    },
    {
      "source": "glossary:data-poisoning",
      "target": "glossary:algorithmic-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:data-protection",
      "target": "glossary:gdpr",
      "relationship": "related_term"
    },
    {
      "source": "glossary:data-protection",
      "target": "glossary:biometric-data",
      "relationship": "related_term"
    },
    {
      "source": "glossary:data-protection",
      "target": "glossary:data-leakage",
      "relationship": "related_term"
    },
    {
      "source": "glossary:decision-loop",
      "target": "glossary:automated-decision-making",
      "relationship": "related_term"
    },
    {
      "source": "glossary:decision-loop",
      "target": "glossary:automation-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:decision-loop",
      "target": "glossary:human-in-the-loop",
      "relationship": "related_term"
    },
    {
      "source": "glossary:decision-loop",
      "target": "glossary:black-box-system",
      "relationship": "related_term"
    },
    {
      "source": "glossary:deepfake",
      "target": "glossary:synthetic-media",
      "relationship": "related_term"
    },
    {
      "source": "glossary:deepfake",
      "target": "glossary:voice-cloning",
      "relationship": "related_term"
    },
    {
      "source": "glossary:deepfake",
      "target": "glossary:non-consensual-intimate-imagery",
      "relationship": "related_term"
    },
    {
      "source": "glossary:defense-in-depth",
      "target": "glossary:guardrail",
      "relationship": "related_term"
    },
    {
      "source": "glossary:defense-in-depth",
      "target": "glossary:input-validation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:defense-in-depth",
      "target": "glossary:least-privilege",
      "relationship": "related_term"
    },
    {
      "source": "glossary:defense-in-depth",
      "target": "glossary:output-sandboxing",
      "relationship": "related_term"
    },
    {
      "source": "glossary:democratic-integrity",
      "target": "glossary:trust-erosion",
      "relationship": "related_term"
    },
    {
      "source": "glossary:democratic-integrity",
      "target": "glossary:election-interference",
      "relationship": "related_term"
    },
    {
      "source": "glossary:democratic-integrity",
      "target": "glossary:disinformation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:deskilling",
      "target": "glossary:job-displacement",
      "relationship": "related_term"
    },
    {
      "source": "glossary:deskilling",
      "target": "glossary:automation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:deskilling",
      "target": "glossary:automation-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:deskilling",
      "target": "glossary:human-agency",
      "relationship": "related_term"
    },
    {
      "source": "glossary:differential-privacy",
      "target": "glossary:anonymization",
      "relationship": "related_term"
    },
    {
      "source": "glossary:differential-privacy",
      "target": "glossary:pseudonymization",
      "relationship": "related_term"
    },
    {
      "source": "glossary:differential-privacy",
      "target": "glossary:re-identification",
      "relationship": "related_term"
    },
    {
      "source": "glossary:differential-privacy",
      "target": "glossary:data-protection",
      "relationship": "related_term"
    },
    {
      "source": "glossary:differential-privacy",
      "target": "glossary:training-data",
      "relationship": "related_term"
    },
    {
      "source": "glossary:diffusion-model",
      "target": "glossary:generative-adversarial-network",
      "relationship": "related_term"
    },
    {
      "source": "glossary:diffusion-model",
      "target": "glossary:deepfake",
      "relationship": "related_term"
    },
    {
      "source": "glossary:diffusion-model",
      "target": "glossary:synthetic-media",
      "relationship": "related_term"
    },
    {
      "source": "glossary:diffusion-model",
      "target": "glossary:content-authenticity",
      "relationship": "related_term"
    },
    {
      "source": "glossary:digital-monopoly",
      "target": "glossary:data-concentration",
      "relationship": "related_term"
    },
    {
      "source": "glossary:digital-monopoly",
      "target": "glossary:black-box-system",
      "relationship": "related_term"
    },
    {
      "source": "glossary:digital-monopoly",
      "target": "glossary:market-manipulation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:digital-watermarking",
      "target": "glossary:content-authenticity",
      "relationship": "related_term"
    },
    {
      "source": "glossary:digital-watermarking",
      "target": "glossary:c2pa",
      "relationship": "related_term"
    },
    {
      "source": "glossary:digital-watermarking",
      "target": "glossary:synthetic-media",
      "relationship": "related_term"
    },
    {
      "source": "glossary:digital-watermarking",
      "target": "glossary:deepfake",
      "relationship": "related_term"
    },
    {
      "source": "glossary:disinformation",
      "target": "glossary:deepfake",
      "relationship": "related_term"
    },
    {
      "source": "glossary:disinformation",
      "target": "glossary:synthetic-media",
      "relationship": "related_term"
    },
    {
      "source": "glossary:disinformation",
      "target": "glossary:election-interference",
      "relationship": "related_term"
    },
    {
      "source": "glossary:disparate-impact",
      "target": "glossary:allocational-harm",
      "relationship": "related_term"
    },
    {
      "source": "glossary:disparate-impact",
      "target": "glossary:proxy-discrimination",
      "relationship": "related_term"
    },
    {
      "source": "glossary:disparate-impact",
      "target": "glossary:fairness",
      "relationship": "related_term"
    },
    {
      "source": "glossary:disparate-impact",
      "target": "glossary:algorithmic-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:dual-use",
      "target": "glossary:biosecurity",
      "relationship": "related_term"
    },
    {
      "source": "glossary:dual-use",
      "target": "glossary:large-language-model",
      "relationship": "related_term"
    },
    {
      "source": "glossary:dual-use",
      "target": "glossary:automated-vulnerability-discovery",
      "relationship": "related_term"
    },
    {
      "source": "glossary:elder-fraud",
      "target": "glossary:grandparent-scam",
      "relationship": "related_term"
    },
    {
      "source": "glossary:elder-fraud",
      "target": "glossary:voice-cloning",
      "relationship": "related_term"
    },
    {
      "source": "glossary:elder-fraud",
      "target": "glossary:robocall",
      "relationship": "related_term"
    },
    {
      "source": "glossary:elder-fraud",
      "target": "glossary:vishing",
      "relationship": "related_term"
    },
    {
      "source": "glossary:election-interference",
      "target": "glossary:deepfake",
      "relationship": "related_term"
    },
    {
      "source": "glossary:election-interference",
      "target": "glossary:disinformation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:election-interference",
      "target": "glossary:synthetic-media",
      "relationship": "related_term"
    },
    {
      "source": "glossary:emergent-behavior",
      "target": "glossary:multi-agent-system",
      "relationship": "related_term"
    },
    {
      "source": "glossary:emergent-behavior",
      "target": "glossary:goal-drift",
      "relationship": "related_term"
    },
    {
      "source": "glossary:emergent-behavior",
      "target": "glossary:alignment",
      "relationship": "related_term"
    },
    {
      "source": "glossary:emergent-behavior",
      "target": "glossary:cascading-failure",
      "relationship": "related_term"
    },
    {
      "source": "glossary:engagement-optimization",
      "target": "glossary:algorithmic-amplification",
      "relationship": "related_term"
    },
    {
      "source": "glossary:engagement-optimization",
      "target": "glossary:dark-pattern",
      "relationship": "related_term"
    },
    {
      "source": "glossary:engagement-optimization",
      "target": "glossary:misinformation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:epistemic-crisis",
      "target": "glossary:trust-erosion",
      "relationship": "related_term"
    },
    {
      "source": "glossary:epistemic-crisis",
      "target": "glossary:misinformation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:epistemic-crisis",
      "target": "glossary:disinformation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:epistemic-crisis",
      "target": "glossary:synthetic-media",
      "relationship": "related_term"
    },
    {
      "source": "glossary:erasure",
      "target": "glossary:representational-harm",
      "relationship": "related_term"
    },
    {
      "source": "glossary:erasure",
      "target": "glossary:data-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:erasure",
      "target": "glossary:algorithmic-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:erasure",
      "target": "glossary:training-data",
      "relationship": "related_term"
    },
    {
      "source": "glossary:evasion-attack",
      "target": "glossary:adversarial-attack",
      "relationship": "related_term"
    },
    {
      "source": "glossary:evasion-attack",
      "target": "glossary:malware",
      "relationship": "related_term"
    },
    {
      "source": "glossary:evasion-attack",
      "target": "glossary:automated-exploit",
      "relationship": "related_term"
    },
    {
      "source": "glossary:existential-risk",
      "target": "glossary:artificial-general-intelligence",
      "relationship": "related_term"
    },
    {
      "source": "glossary:existential-risk",
      "target": "glossary:superintelligence",
      "relationship": "related_term"
    },
    {
      "source": "glossary:existential-risk",
      "target": "glossary:recursive-self-improvement",
      "relationship": "related_term"
    },
    {
      "source": "glossary:explainability",
      "target": "glossary:black-box-system",
      "relationship": "related_term"
    },
    {
      "source": "glossary:explainability",
      "target": "glossary:algorithmic-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:explainability",
      "target": "glossary:automated-decision-making",
      "relationship": "related_term"
    },
    {
      "source": "glossary:explainability",
      "target": "glossary:gdpr",
      "relationship": "related_term"
    },
    {
      "source": "glossary:facial-recognition",
      "target": "glossary:biometric-data",
      "relationship": "related_term"
    },
    {
      "source": "glossary:facial-recognition",
      "target": "glossary:mass-surveillance",
      "relationship": "related_term"
    },
    {
      "source": "glossary:facial-recognition",
      "target": "glossary:algorithmic-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:fairness",
      "target": "glossary:algorithmic-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:fairness",
      "target": "glossary:proxy-variable",
      "relationship": "related_term"
    },
    {
      "source": "glossary:fairness",
      "target": "glossary:explainability",
      "relationship": "related_term"
    },
    {
      "source": "glossary:fairness",
      "target": "glossary:proxy-discrimination",
      "relationship": "related_term"
    },
    {
      "source": "glossary:feedback-loop",
      "target": "glossary:algorithmic-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:feedback-loop",
      "target": "glossary:decision-loop",
      "relationship": "related_term"
    },
    {
      "source": "glossary:feedback-loop",
      "target": "glossary:data-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:feedback-loop",
      "target": "glossary:automation-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:fine-tuning",
      "target": "glossary:training-data",
      "relationship": "related_term"
    },
    {
      "source": "glossary:fine-tuning",
      "target": "glossary:transfer-learning",
      "relationship": "related_term"
    },
    {
      "source": "glossary:fine-tuning",
      "target": "glossary:large-language-model",
      "relationship": "related_term"
    },
    {
      "source": "glossary:fine-tuning",
      "target": "glossary:rlhf",
      "relationship": "related_term"
    },
    {
      "source": "glossary:fine-tuning",
      "target": "glossary:data-poisoning",
      "relationship": "related_term"
    },
    {
      "source": "glossary:flash-crash",
      "target": "glossary:algorithmic-trading",
      "relationship": "related_term"
    },
    {
      "source": "glossary:flash-crash",
      "target": "glossary:cascading-failure",
      "relationship": "related_term"
    },
    {
      "source": "glossary:flash-crash",
      "target": "glossary:multi-agent-system",
      "relationship": "related_term"
    },
    {
      "source": "glossary:flash-crash",
      "target": "glossary:coordination-failure",
      "relationship": "related_term"
    },
    {
      "source": "glossary:foundation-model",
      "target": "glossary:large-language-model",
      "relationship": "related_term"
    },
    {
      "source": "glossary:foundation-model",
      "target": "glossary:training-data",
      "relationship": "related_term"
    },
    {
      "source": "glossary:foundation-model",
      "target": "glossary:hallucination",
      "relationship": "related_term"
    },
    {
      "source": "glossary:foundation-model",
      "target": "glossary:dual-use",
      "relationship": "related_term"
    },
    {
      "source": "glossary:function-calling",
      "target": "glossary:model-context-protocol",
      "relationship": "related_term"
    },
    {
      "source": "glossary:function-calling",
      "target": "glossary:agent-framework",
      "relationship": "related_term"
    },
    {
      "source": "glossary:function-calling",
      "target": "glossary:agentic-ai",
      "relationship": "related_term"
    },
    {
      "source": "glossary:function-calling",
      "target": "glossary:privilege-escalation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:gdpr",
      "target": "glossary:data-protection",
      "relationship": "related_term"
    },
    {
      "source": "glossary:gdpr",
      "target": "glossary:biometric-data",
      "relationship": "related_term"
    },
    {
      "source": "glossary:generative-adversarial-network",
      "target": "glossary:deepfake",
      "relationship": "related_term"
    },
    {
      "source": "glossary:generative-adversarial-network",
      "target": "glossary:synthetic-media",
      "relationship": "related_term"
    },
    {
      "source": "glossary:generative-adversarial-network",
      "target": "glossary:diffusion-model",
      "relationship": "related_term"
    },
    {
      "source": "glossary:generative-adversarial-network",
      "target": "glossary:voice-cloning",
      "relationship": "related_term"
    },
    {
      "source": "glossary:goal-drift",
      "target": "glossary:agentic-ai",
      "relationship": "related_term"
    },
    {
      "source": "glossary:goal-drift",
      "target": "glossary:human-in-the-loop",
      "relationship": "related_term"
    },
    {
      "source": "glossary:goal-drift",
      "target": "glossary:automation-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:goodharts-law",
      "target": "glossary:reward-hacking",
      "relationship": "related_term"
    },
    {
      "source": "glossary:goodharts-law",
      "target": "glossary:goal-drift",
      "relationship": "related_term"
    },
    {
      "source": "glossary:goodharts-law",
      "target": "glossary:alignment",
      "relationship": "related_term"
    },
    {
      "source": "glossary:governance",
      "target": "glossary:accountability",
      "relationship": "related_term"
    },
    {
      "source": "glossary:governance",
      "target": "glossary:explainability",
      "relationship": "related_term"
    },
    {
      "source": "glossary:governance",
      "target": "glossary:fairness",
      "relationship": "related_term"
    },
    {
      "source": "glossary:governance",
      "target": "glossary:alignment",
      "relationship": "related_term"
    },
    {
      "source": "glossary:grandparent-scam",
      "target": "glossary:voice-cloning",
      "relationship": "related_term"
    },
    {
      "source": "glossary:grandparent-scam",
      "target": "glossary:elder-fraud",
      "relationship": "related_term"
    },
    {
      "source": "glossary:grandparent-scam",
      "target": "glossary:deepfake",
      "relationship": "related_term"
    },
    {
      "source": "glossary:grandparent-scam",
      "target": "glossary:vishing",
      "relationship": "related_term"
    },
    {
      "source": "glossary:guardrail",
      "target": "glossary:alignment",
      "relationship": "related_term"
    },
    {
      "source": "glossary:guardrail",
      "target": "glossary:rlhf",
      "relationship": "related_term"
    },
    {
      "source": "glossary:guardrail",
      "target": "glossary:jailbreak-attack",
      "relationship": "related_term"
    },
    {
      "source": "glossary:hallucination",
      "target": "glossary:large-language-model",
      "relationship": "related_term"
    },
    {
      "source": "glossary:hallucination",
      "target": "glossary:automation-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:human-agency",
      "target": "glossary:human-in-the-loop",
      "relationship": "related_term"
    },
    {
      "source": "glossary:human-agency",
      "target": "glossary:automation-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:human-agency",
      "target": "glossary:automated-decision-making",
      "relationship": "related_term"
    },
    {
      "source": "glossary:human-in-the-loop",
      "target": "glossary:automated-decision-making",
      "relationship": "related_term"
    },
    {
      "source": "glossary:human-in-the-loop",
      "target": "glossary:automation-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:human-in-the-loop",
      "target": "glossary:agentic-ai",
      "relationship": "related_term"
    },
    {
      "source": "glossary:indirect-prompt-injection",
      "target": "glossary:prompt-injection",
      "relationship": "related_term"
    },
    {
      "source": "glossary:indirect-prompt-injection",
      "target": "glossary:retrieval-augmented-generation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:indirect-prompt-injection",
      "target": "glossary:context-injection",
      "relationship": "related_term"
    },
    {
      "source": "glossary:indirect-prompt-injection",
      "target": "glossary:memory-poisoning",
      "relationship": "related_term"
    },
    {
      "source": "glossary:information-ecosystem",
      "target": "glossary:misinformation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:information-ecosystem",
      "target": "glossary:disinformation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:information-ecosystem",
      "target": "glossary:trust-erosion",
      "relationship": "related_term"
    },
    {
      "source": "glossary:information-ecosystem",
      "target": "glossary:epistemic-crisis",
      "relationship": "related_term"
    },
    {
      "source": "glossary:information-integrity",
      "target": "glossary:misinformation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:information-integrity",
      "target": "glossary:hallucination",
      "relationship": "related_term"
    },
    {
      "source": "glossary:information-integrity",
      "target": "glossary:content-authenticity",
      "relationship": "related_term"
    },
    {
      "source": "glossary:information-integrity",
      "target": "glossary:disinformation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:infrastructure-dependency",
      "target": "glossary:cascading-failure",
      "relationship": "related_term"
    },
    {
      "source": "glossary:infrastructure-dependency",
      "target": "glossary:systemic-risk",
      "relationship": "related_term"
    },
    {
      "source": "glossary:infrastructure-dependency",
      "target": "glossary:black-box-system",
      "relationship": "related_term"
    },
    {
      "source": "glossary:input-validation",
      "target": "glossary:prompt-injection",
      "relationship": "related_term"
    },
    {
      "source": "glossary:input-validation",
      "target": "glossary:guardrail",
      "relationship": "related_term"
    },
    {
      "source": "glossary:input-validation",
      "target": "glossary:output-sandboxing",
      "relationship": "related_term"
    },
    {
      "source": "glossary:institutional-trust",
      "target": "glossary:trust-erosion",
      "relationship": "related_term"
    },
    {
      "source": "glossary:institutional-trust",
      "target": "glossary:democratic-integrity",
      "relationship": "related_term"
    },
    {
      "source": "glossary:institutional-trust",
      "target": "glossary:governance",
      "relationship": "related_term"
    },
    {
      "source": "glossary:instruction-hierarchy",
      "target": "glossary:prompt-injection",
      "relationship": "related_term"
    },
    {
      "source": "glossary:instruction-hierarchy",
      "target": "glossary:indirect-prompt-injection",
      "relationship": "related_term"
    },
    {
      "source": "glossary:instruction-hierarchy",
      "target": "glossary:system-prompt",
      "relationship": "related_term"
    },
    {
      "source": "glossary:instruction-hierarchy",
      "target": "glossary:guardrail",
      "relationship": "related_term"
    },
    {
      "source": "glossary:instrumental-convergence",
      "target": "glossary:alignment",
      "relationship": "related_term"
    },
    {
      "source": "glossary:instrumental-convergence",
      "target": "glossary:misalignment",
      "relationship": "related_term"
    },
    {
      "source": "glossary:instrumental-convergence",
      "target": "glossary:goal-drift",
      "relationship": "related_term"
    },
    {
      "source": "glossary:instrumental-convergence",
      "target": "glossary:self-replication",
      "relationship": "related_term"
    },
    {
      "source": "glossary:instrumental-convergence",
      "target": "glossary:specification-gaming",
      "relationship": "related_term"
    },
    {
      "source": "glossary:instrumental-convergence",
      "target": "glossary:recursive-self-improvement",
      "relationship": "related_term"
    },
    {
      "source": "glossary:international-humanitarian-law",
      "target": "glossary:autonomous-weapons",
      "relationship": "related_term"
    },
    {
      "source": "glossary:international-humanitarian-law",
      "target": "glossary:accountability",
      "relationship": "related_term"
    },
    {
      "source": "glossary:international-humanitarian-law",
      "target": "glossary:governance",
      "relationship": "related_term"
    },
    {
      "source": "glossary:jailbreak-attack",
      "target": "glossary:prompt-injection",
      "relationship": "related_term"
    },
    {
      "source": "glossary:jailbreak-attack",
      "target": "glossary:adversarial-attack",
      "relationship": "related_term"
    },
    {
      "source": "glossary:jailbreak-attack",
      "target": "glossary:alignment",
      "relationship": "related_term"
    },
    {
      "source": "glossary:jailbreak-attack",
      "target": "glossary:large-language-model",
      "relationship": "related_term"
    },
    {
      "source": "glossary:job-displacement",
      "target": "glossary:automated-decision-making",
      "relationship": "related_term"
    },
    {
      "source": "glossary:job-displacement",
      "target": "glossary:automation-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:job-displacement",
      "target": "glossary:large-language-model",
      "relationship": "related_term"
    },
    {
      "source": "glossary:large-language-model",
      "target": "glossary:hallucination",
      "relationship": "related_term"
    },
    {
      "source": "glossary:large-language-model",
      "target": "glossary:training-data",
      "relationship": "related_term"
    },
    {
      "source": "glossary:large-language-model",
      "target": "glossary:agentic-ai",
      "relationship": "related_term"
    },
    {
      "source": "glossary:least-privilege",
      "target": "glossary:privilege-escalation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:least-privilege",
      "target": "glossary:attack-surface",
      "relationship": "related_term"
    },
    {
      "source": "glossary:least-privilege",
      "target": "glossary:output-sandboxing",
      "relationship": "related_term"
    },
    {
      "source": "glossary:lethal-autonomous-weapon",
      "target": "glossary:automated-decision-making",
      "relationship": "related_term"
    },
    {
      "source": "glossary:lethal-autonomous-weapon",
      "target": "glossary:human-in-the-loop",
      "relationship": "related_term"
    },
    {
      "source": "glossary:lethal-autonomous-weapon",
      "target": "glossary:ai-safety",
      "relationship": "related_term"
    },
    {
      "source": "glossary:laws",
      "target": "glossary:autonomous-weapons",
      "relationship": "related_term"
    },
    {
      "source": "glossary:laws",
      "target": "glossary:human-in-the-loop",
      "relationship": "related_term"
    },
    {
      "source": "glossary:laws",
      "target": "glossary:international-humanitarian-law",
      "relationship": "related_term"
    },
    {
      "source": "glossary:laws",
      "target": "glossary:alignment",
      "relationship": "related_term"
    },
    {
      "source": "glossary:liars-dividend",
      "target": "glossary:deepfake",
      "relationship": "related_term"
    },
    {
      "source": "glossary:liars-dividend",
      "target": "glossary:synthetic-media",
      "relationship": "related_term"
    },
    {
      "source": "glossary:liars-dividend",
      "target": "glossary:content-authenticity",
      "relationship": "related_term"
    },
    {
      "source": "glossary:liars-dividend",
      "target": "glossary:trust-erosion",
      "relationship": "related_term"
    },
    {
      "source": "glossary:liars-dividend",
      "target": "glossary:epistemic-crisis",
      "relationship": "related_term"
    },
    {
      "source": "glossary:malware",
      "target": "glossary:phishing",
      "relationship": "related_term"
    },
    {
      "source": "glossary:malware",
      "target": "glossary:cyber-espionage",
      "relationship": "related_term"
    },
    {
      "source": "glossary:malware",
      "target": "glossary:business-email-compromise",
      "relationship": "related_term"
    },
    {
      "source": "glossary:manipulative-design",
      "target": "glossary:dark-pattern",
      "relationship": "related_term"
    },
    {
      "source": "glossary:manipulative-design",
      "target": "glossary:human-agency",
      "relationship": "related_term"
    },
    {
      "source": "glossary:manipulative-design",
      "target": "glossary:autonomy",
      "relationship": "related_term"
    },
    {
      "source": "glossary:manipulative-design",
      "target": "glossary:automation-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:market-manipulation",
      "target": "glossary:algorithmic-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:market-manipulation",
      "target": "glossary:automated-decision-making",
      "relationship": "related_term"
    },
    {
      "source": "glossary:market-manipulation",
      "target": "glossary:large-language-model",
      "relationship": "related_term"
    },
    {
      "source": "glossary:market-power",
      "target": "glossary:data-concentration",
      "relationship": "related_term"
    },
    {
      "source": "glossary:market-power",
      "target": "glossary:digital-monopoly",
      "relationship": "related_term"
    },
    {
      "source": "glossary:market-power",
      "target": "glossary:market-manipulation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:mass-surveillance",
      "target": "glossary:facial-recognition",
      "relationship": "related_term"
    },
    {
      "source": "glossary:mass-surveillance",
      "target": "glossary:biometric-data",
      "relationship": "related_term"
    },
    {
      "source": "glossary:media-manipulation",
      "target": "glossary:synthetic-media",
      "relationship": "related_term"
    },
    {
      "source": "glossary:media-manipulation",
      "target": "glossary:deepfake",
      "relationship": "related_term"
    },
    {
      "source": "glossary:media-manipulation",
      "target": "glossary:disinformation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:media-manipulation",
      "target": "glossary:content-authenticity",
      "relationship": "related_term"
    },
    {
      "source": "glossary:membership-inference",
      "target": "glossary:model-inversion",
      "relationship": "related_term"
    },
    {
      "source": "glossary:membership-inference",
      "target": "glossary:data-extraction",
      "relationship": "related_term"
    },
    {
      "source": "glossary:membership-inference",
      "target": "glossary:re-identification",
      "relationship": "related_term"
    },
    {
      "source": "glossary:membership-inference",
      "target": "glossary:data-protection",
      "relationship": "related_term"
    },
    {
      "source": "glossary:memory-poisoning",
      "target": "glossary:agentic-ai",
      "relationship": "related_term"
    },
    {
      "source": "glossary:memory-poisoning",
      "target": "glossary:data-poisoning",
      "relationship": "related_term"
    },
    {
      "source": "glossary:memory-poisoning",
      "target": "glossary:hallucination",
      "relationship": "related_term"
    },
    {
      "source": "glossary:misalignment",
      "target": "glossary:alignment",
      "relationship": "related_term"
    },
    {
      "source": "glossary:misalignment",
      "target": "glossary:goal-drift",
      "relationship": "related_term"
    },
    {
      "source": "glossary:misalignment",
      "target": "glossary:superintelligence",
      "relationship": "related_term"
    },
    {
      "source": "glossary:misalignment",
      "target": "glossary:human-in-the-loop",
      "relationship": "related_term"
    },
    {
      "source": "glossary:misinformation",
      "target": "glossary:disinformation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:misinformation",
      "target": "glossary:hallucination",
      "relationship": "related_term"
    },
    {
      "source": "glossary:misinformation",
      "target": "glossary:synthetic-media",
      "relationship": "related_term"
    },
    {
      "source": "glossary:misinformation",
      "target": "glossary:deepfake",
      "relationship": "related_term"
    },
    {
      "source": "glossary:misinformation",
      "target": "glossary:large-language-model",
      "relationship": "related_term"
    },
    {
      "source": "glossary:mitre-atlas",
      "target": "glossary:adversarial-attack",
      "relationship": "related_term"
    },
    {
      "source": "glossary:mitre-atlas",
      "target": "glossary:owasp-top-10-llm",
      "relationship": "related_term"
    },
    {
      "source": "glossary:mitre-atlas",
      "target": "glossary:ai-risk-management-framework",
      "relationship": "related_term"
    },
    {
      "source": "glossary:mitre-atlas",
      "target": "glossary:red-teaming",
      "relationship": "related_term"
    },
    {
      "source": "glossary:model-context-protocol",
      "target": "glossary:function-calling",
      "relationship": "related_term"
    },
    {
      "source": "glossary:model-context-protocol",
      "target": "glossary:agent-framework",
      "relationship": "related_term"
    },
    {
      "source": "glossary:model-context-protocol",
      "target": "glossary:attack-surface",
      "relationship": "related_term"
    },
    {
      "source": "glossary:model-context-protocol",
      "target": "glossary:least-privilege",
      "relationship": "related_term"
    },
    {
      "source": "glossary:model-inversion",
      "target": "glossary:data-leakage",
      "relationship": "related_term"
    },
    {
      "source": "glossary:model-inversion",
      "target": "glossary:biometric-data",
      "relationship": "related_term"
    },
    {
      "source": "glossary:model-inversion",
      "target": "glossary:training-data",
      "relationship": "related_term"
    },
    {
      "source": "glossary:model-provenance",
      "target": "glossary:supply-chain-attack",
      "relationship": "related_term"
    },
    {
      "source": "glossary:model-provenance",
      "target": "glossary:backdoor-attack",
      "relationship": "related_term"
    },
    {
      "source": "glossary:multi-agent-system",
      "target": "glossary:agentic-ai",
      "relationship": "related_term"
    },
    {
      "source": "glossary:multi-agent-system",
      "target": "glossary:human-in-the-loop",
      "relationship": "related_term"
    },
    {
      "source": "glossary:non-consensual-intimate-imagery",
      "target": "glossary:deepfake",
      "relationship": "related_term"
    },
    {
      "source": "glossary:non-consensual-intimate-imagery",
      "target": "glossary:synthetic-media",
      "relationship": "related_term"
    },
    {
      "source": "glossary:output-sandboxing",
      "target": "glossary:defense-in-depth",
      "relationship": "related_term"
    },
    {
      "source": "glossary:output-sandboxing",
      "target": "glossary:least-privilege",
      "relationship": "related_term"
    },
    {
      "source": "glossary:output-sandboxing",
      "target": "glossary:input-validation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:output-sandboxing",
      "target": "glossary:guardrail",
      "relationship": "related_term"
    },
    {
      "source": "glossary:overreliance",
      "target": "glossary:automation-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:overreliance",
      "target": "glossary:complacency",
      "relationship": "related_term"
    },
    {
      "source": "glossary:overreliance",
      "target": "glossary:human-in-the-loop",
      "relationship": "related_term"
    },
    {
      "source": "glossary:overreliance",
      "target": "glossary:alert-fatigue",
      "relationship": "related_term"
    },
    {
      "source": "glossary:owasp-top-10-llm",
      "target": "glossary:prompt-injection",
      "relationship": "related_term"
    },
    {
      "source": "glossary:owasp-top-10-llm",
      "target": "glossary:guardrail",
      "relationship": "related_term"
    },
    {
      "source": "glossary:owasp-top-10-llm",
      "target": "glossary:mitre-atlas",
      "relationship": "related_term"
    },
    {
      "source": "glossary:owasp-top-10-llm",
      "target": "glossary:ai-risk-management-framework",
      "relationship": "related_term"
    },
    {
      "source": "glossary:persistent-memory",
      "target": "glossary:memory-poisoning",
      "relationship": "related_term"
    },
    {
      "source": "glossary:persistent-memory",
      "target": "glossary:agentic-ai",
      "relationship": "related_term"
    },
    {
      "source": "glossary:persistent-memory",
      "target": "glossary:context-injection",
      "relationship": "related_term"
    },
    {
      "source": "glossary:persuasive-technology",
      "target": "glossary:dark-pattern",
      "relationship": "related_term"
    },
    {
      "source": "glossary:persuasive-technology",
      "target": "glossary:manipulative-design",
      "relationship": "related_term"
    },
    {
      "source": "glossary:persuasive-technology",
      "target": "glossary:engagement-optimization",
      "relationship": "related_term"
    },
    {
      "source": "glossary:persuasive-technology",
      "target": "glossary:human-agency",
      "relationship": "related_term"
    },
    {
      "source": "glossary:phishing",
      "target": "glossary:vishing",
      "relationship": "related_term"
    },
    {
      "source": "glossary:phishing",
      "target": "glossary:smishing",
      "relationship": "related_term"
    },
    {
      "source": "glossary:phishing",
      "target": "glossary:business-email-compromise",
      "relationship": "related_term"
    },
    {
      "source": "glossary:polymorphic-malware",
      "target": "glossary:malware",
      "relationship": "related_term"
    },
    {
      "source": "glossary:polymorphic-malware",
      "target": "glossary:ai-generated-code",
      "relationship": "related_term"
    },
    {
      "source": "glossary:polymorphic-malware",
      "target": "glossary:adversarial-attack",
      "relationship": "related_term"
    },
    {
      "source": "glossary:polymorphic-malware",
      "target": "glossary:evasion-attack",
      "relationship": "related_term"
    },
    {
      "source": "glossary:price-fixing",
      "target": "glossary:market-manipulation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:price-fixing",
      "target": "glossary:algorithmic-trading",
      "relationship": "related_term"
    },
    {
      "source": "glossary:price-fixing",
      "target": "glossary:automated-decision-making",
      "relationship": "related_term"
    },
    {
      "source": "glossary:privilege-escalation",
      "target": "glossary:agentic-ai",
      "relationship": "related_term"
    },
    {
      "source": "glossary:privilege-escalation",
      "target": "glossary:cyber-espionage",
      "relationship": "related_term"
    },
    {
      "source": "glossary:privilege-escalation",
      "target": "glossary:automated-vulnerability-discovery",
      "relationship": "related_term"
    },
    {
      "source": "glossary:profiling",
      "target": "glossary:behavioral-profiling",
      "relationship": "related_term"
    },
    {
      "source": "glossary:profiling",
      "target": "glossary:attribute-inference",
      "relationship": "related_term"
    },
    {
      "source": "glossary:profiling",
      "target": "glossary:mass-surveillance",
      "relationship": "related_term"
    },
    {
      "source": "glossary:profiling",
      "target": "glossary:consent",
      "relationship": "related_term"
    },
    {
      "source": "glossary:prompt-injection",
      "target": "glossary:context-injection",
      "relationship": "related_term"
    },
    {
      "source": "glossary:prompt-injection",
      "target": "glossary:jailbreak-attack",
      "relationship": "related_term"
    },
    {
      "source": "glossary:prompt-injection",
      "target": "glossary:adversarial-attack",
      "relationship": "related_term"
    },
    {
      "source": "glossary:prompt-injection",
      "target": "glossary:data-poisoning",
      "relationship": "related_term"
    },
    {
      "source": "glossary:propaganda",
      "target": "glossary:disinformation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:propaganda",
      "target": "glossary:coordinated-inauthentic-behavior",
      "relationship": "related_term"
    },
    {
      "source": "glossary:propaganda",
      "target": "glossary:election-interference",
      "relationship": "related_term"
    },
    {
      "source": "glossary:propaganda",
      "target": "glossary:synthetic-media",
      "relationship": "related_term"
    },
    {
      "source": "glossary:protected-characteristics",
      "target": "glossary:proxy-discrimination",
      "relationship": "related_term"
    },
    {
      "source": "glossary:protected-characteristics",
      "target": "glossary:fairness",
      "relationship": "related_term"
    },
    {
      "source": "glossary:protected-characteristics",
      "target": "glossary:algorithmic-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:protected-characteristics",
      "target": "glossary:disparate-impact",
      "relationship": "related_term"
    },
    {
      "source": "glossary:proxy-discrimination",
      "target": "glossary:algorithmic-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:proxy-discrimination",
      "target": "glossary:proxy-variable",
      "relationship": "related_term"
    },
    {
      "source": "glossary:proxy-discrimination",
      "target": "glossary:training-data",
      "relationship": "related_term"
    },
    {
      "source": "glossary:proxy-discrimination",
      "target": "glossary:fairness",
      "relationship": "related_term"
    },
    {
      "source": "glossary:proxy-variable",
      "target": "glossary:algorithmic-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:proxy-variable",
      "target": "glossary:automated-decision-making",
      "relationship": "related_term"
    },
    {
      "source": "glossary:pseudonymization",
      "target": "glossary:anonymization",
      "relationship": "related_term"
    },
    {
      "source": "glossary:pseudonymization",
      "target": "glossary:re-identification",
      "relationship": "related_term"
    },
    {
      "source": "glossary:pseudonymization",
      "target": "glossary:data-protection",
      "relationship": "related_term"
    },
    {
      "source": "glossary:pseudonymization",
      "target": "glossary:gdpr",
      "relationship": "related_term"
    },
    {
      "source": "glossary:re-identification",
      "target": "glossary:biometric-data",
      "relationship": "related_term"
    },
    {
      "source": "glossary:re-identification",
      "target": "glossary:mass-surveillance",
      "relationship": "related_term"
    },
    {
      "source": "glossary:re-identification",
      "target": "glossary:facial-recognition",
      "relationship": "related_term"
    },
    {
      "source": "glossary:re-identification",
      "target": "glossary:data-protection",
      "relationship": "related_term"
    },
    {
      "source": "glossary:recommendation-system",
      "target": "glossary:algorithmic-amplification",
      "relationship": "related_term"
    },
    {
      "source": "glossary:recommendation-system",
      "target": "glossary:engagement-optimization",
      "relationship": "related_term"
    },
    {
      "source": "glossary:recommendation-system",
      "target": "glossary:dark-pattern",
      "relationship": "related_term"
    },
    {
      "source": "glossary:recommendation-system",
      "target": "glossary:misinformation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:recursive-self-improvement",
      "target": "glossary:superintelligence",
      "relationship": "related_term"
    },
    {
      "source": "glossary:recursive-self-improvement",
      "target": "glossary:existential-risk",
      "relationship": "related_term"
    },
    {
      "source": "glossary:recursive-self-improvement",
      "target": "glossary:agentic-ai",
      "relationship": "related_term"
    },
    {
      "source": "glossary:red-teaming",
      "target": "glossary:adversarial-attack",
      "relationship": "related_term"
    },
    {
      "source": "glossary:red-teaming",
      "target": "glossary:prompt-injection",
      "relationship": "related_term"
    },
    {
      "source": "glossary:red-teaming",
      "target": "glossary:jailbreak-attack",
      "relationship": "related_term"
    },
    {
      "source": "glossary:red-teaming",
      "target": "glossary:robustness",
      "relationship": "related_term"
    },
    {
      "source": "glossary:red-teaming",
      "target": "glossary:agent-safety",
      "relationship": "related_term"
    },
    {
      "source": "glossary:remote-code-execution",
      "target": "glossary:privilege-escalation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:remote-code-execution",
      "target": "glossary:prompt-injection",
      "relationship": "related_term"
    },
    {
      "source": "glossary:remote-code-execution",
      "target": "glossary:function-calling",
      "relationship": "related_term"
    },
    {
      "source": "glossary:representation-gap",
      "target": "glossary:data-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:representation-gap",
      "target": "glossary:representational-harm",
      "relationship": "related_term"
    },
    {
      "source": "glossary:representation-gap",
      "target": "glossary:training-data",
      "relationship": "related_term"
    },
    {
      "source": "glossary:representation-gap",
      "target": "glossary:fairness",
      "relationship": "related_term"
    },
    {
      "source": "glossary:representational-harm",
      "target": "glossary:algorithmic-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:representational-harm",
      "target": "glossary:training-data",
      "relationship": "related_term"
    },
    {
      "source": "glossary:representational-harm",
      "target": "glossary:fairness",
      "relationship": "related_term"
    },
    {
      "source": "glossary:representational-harm",
      "target": "glossary:proxy-discrimination",
      "relationship": "related_term"
    },
    {
      "source": "glossary:retrieval-augmented-generation",
      "target": "glossary:prompt-injection",
      "relationship": "related_term"
    },
    {
      "source": "glossary:retrieval-augmented-generation",
      "target": "glossary:system-prompt",
      "relationship": "related_term"
    },
    {
      "source": "glossary:retrieval-augmented-generation",
      "target": "glossary:large-language-model",
      "relationship": "related_term"
    },
    {
      "source": "glossary:reward-hacking",
      "target": "glossary:goal-drift",
      "relationship": "related_term"
    },
    {
      "source": "glossary:reward-hacking",
      "target": "glossary:alignment",
      "relationship": "related_term"
    },
    {
      "source": "glossary:reward-hacking",
      "target": "glossary:misalignment",
      "relationship": "related_term"
    },
    {
      "source": "glossary:reward-hacking",
      "target": "glossary:agentic-ai",
      "relationship": "related_term"
    },
    {
      "source": "glossary:rlhf",
      "target": "glossary:alignment",
      "relationship": "related_term"
    },
    {
      "source": "glossary:rlhf",
      "target": "glossary:jailbreak-attack",
      "relationship": "related_term"
    },
    {
      "source": "glossary:rlhf",
      "target": "glossary:guardrail",
      "relationship": "related_term"
    },
    {
      "source": "glossary:robocall",
      "target": "glossary:voice-cloning",
      "relationship": "related_term"
    },
    {
      "source": "glossary:robocall",
      "target": "glossary:vishing",
      "relationship": "related_term"
    },
    {
      "source": "glossary:robocall",
      "target": "glossary:elder-fraud",
      "relationship": "related_term"
    },
    {
      "source": "glossary:robustness",
      "target": "glossary:adversarial-attack",
      "relationship": "related_term"
    },
    {
      "source": "glossary:robustness",
      "target": "glossary:evasion-attack",
      "relationship": "related_term"
    },
    {
      "source": "glossary:robustness",
      "target": "glossary:data-poisoning",
      "relationship": "related_term"
    },
    {
      "source": "glossary:robustness",
      "target": "glossary:alignment",
      "relationship": "related_term"
    },
    {
      "source": "glossary:safety-critical",
      "target": "glossary:human-in-the-loop",
      "relationship": "related_term"
    },
    {
      "source": "glossary:safety-critical",
      "target": "glossary:autonomous-vehicle",
      "relationship": "related_term"
    },
    {
      "source": "glossary:safety-critical",
      "target": "glossary:autonomous-weapons",
      "relationship": "related_term"
    },
    {
      "source": "glossary:safety-critical",
      "target": "glossary:accountability",
      "relationship": "related_term"
    },
    {
      "source": "glossary:self-determination",
      "target": "glossary:human-agency",
      "relationship": "related_term"
    },
    {
      "source": "glossary:self-determination",
      "target": "glossary:autonomy",
      "relationship": "related_term"
    },
    {
      "source": "glossary:self-determination",
      "target": "glossary:consent",
      "relationship": "related_term"
    },
    {
      "source": "glossary:self-determination",
      "target": "glossary:authority-transfer",
      "relationship": "related_term"
    },
    {
      "source": "glossary:self-replication",
      "target": "glossary:agentic-ai",
      "relationship": "related_term"
    },
    {
      "source": "glossary:self-replication",
      "target": "glossary:recursive-self-improvement",
      "relationship": "related_term"
    },
    {
      "source": "glossary:self-replication",
      "target": "glossary:emergent-behavior",
      "relationship": "related_term"
    },
    {
      "source": "glossary:self-replication",
      "target": "glossary:autonomous-weapons",
      "relationship": "related_term"
    },
    {
      "source": "glossary:sensitive-data",
      "target": "glossary:data-protection",
      "relationship": "related_term"
    },
    {
      "source": "glossary:sensitive-data",
      "target": "glossary:gdpr",
      "relationship": "related_term"
    },
    {
      "source": "glossary:sensitive-data",
      "target": "glossary:consent",
      "relationship": "related_term"
    },
    {
      "source": "glossary:sensitive-data",
      "target": "glossary:attribute-inference",
      "relationship": "related_term"
    },
    {
      "source": "glossary:sensitive-data",
      "target": "glossary:re-identification",
      "relationship": "related_term"
    },
    {
      "source": "glossary:single-point-of-failure",
      "target": "glossary:infrastructure-dependency",
      "relationship": "related_term"
    },
    {
      "source": "glossary:single-point-of-failure",
      "target": "glossary:cascading-failure",
      "relationship": "related_term"
    },
    {
      "source": "glossary:single-point-of-failure",
      "target": "glossary:systemic-risk",
      "relationship": "related_term"
    },
    {
      "source": "glossary:smishing",
      "target": "glossary:phishing",
      "relationship": "related_term"
    },
    {
      "source": "glossary:smishing",
      "target": "glossary:vishing",
      "relationship": "related_term"
    },
    {
      "source": "glossary:social-engineering",
      "target": "glossary:phishing",
      "relationship": "related_term"
    },
    {
      "source": "glossary:social-engineering",
      "target": "glossary:deepfake",
      "relationship": "related_term"
    },
    {
      "source": "glossary:social-engineering",
      "target": "glossary:voice-cloning",
      "relationship": "related_term"
    },
    {
      "source": "glossary:social-scoring",
      "target": "glossary:mass-surveillance",
      "relationship": "related_term"
    },
    {
      "source": "glossary:social-scoring",
      "target": "glossary:behavioral-profiling",
      "relationship": "related_term"
    },
    {
      "source": "glossary:social-scoring",
      "target": "glossary:automated-decision-making",
      "relationship": "related_term"
    },
    {
      "source": "glossary:social-scoring",
      "target": "glossary:consent",
      "relationship": "related_term"
    },
    {
      "source": "glossary:specification-gaming",
      "target": "glossary:reward-hacking",
      "relationship": "related_term"
    },
    {
      "source": "glossary:specification-gaming",
      "target": "glossary:goal-drift",
      "relationship": "related_term"
    },
    {
      "source": "glossary:specification-gaming",
      "target": "glossary:misalignment",
      "relationship": "related_term"
    },
    {
      "source": "glossary:specification-gaming",
      "target": "glossary:alignment",
      "relationship": "related_term"
    },
    {
      "source": "glossary:specification-gaming",
      "target": "glossary:goodharts-law",
      "relationship": "related_term"
    },
    {
      "source": "glossary:stereotyping",
      "target": "glossary:representational-harm",
      "relationship": "related_term"
    },
    {
      "source": "glossary:stereotyping",
      "target": "glossary:data-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:stereotyping",
      "target": "glossary:algorithmic-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:stereotyping",
      "target": "glossary:erasure",
      "relationship": "related_term"
    },
    {
      "source": "glossary:superintelligence",
      "target": "glossary:artificial-general-intelligence",
      "relationship": "related_term"
    },
    {
      "source": "glossary:superintelligence",
      "target": "glossary:recursive-self-improvement",
      "relationship": "related_term"
    },
    {
      "source": "glossary:superintelligence",
      "target": "glossary:existential-risk",
      "relationship": "related_term"
    },
    {
      "source": "glossary:supply-chain-attack",
      "target": "glossary:backdoor-attack",
      "relationship": "related_term"
    },
    {
      "source": "glossary:supply-chain-attack",
      "target": "glossary:data-poisoning",
      "relationship": "related_term"
    },
    {
      "source": "glossary:supply-chain-attack",
      "target": "glossary:model-provenance",
      "relationship": "related_term"
    },
    {
      "source": "glossary:synthetic-identity",
      "target": "glossary:deepfake",
      "relationship": "related_term"
    },
    {
      "source": "glossary:synthetic-identity",
      "target": "glossary:voice-cloning",
      "relationship": "related_term"
    },
    {
      "source": "glossary:synthetic-identity",
      "target": "glossary:social-engineering",
      "relationship": "related_term"
    },
    {
      "source": "glossary:synthetic-identity",
      "target": "glossary:elder-fraud",
      "relationship": "related_term"
    },
    {
      "source": "glossary:synthetic-media",
      "target": "glossary:deepfake",
      "relationship": "related_term"
    },
    {
      "source": "glossary:synthetic-media",
      "target": "glossary:voice-cloning",
      "relationship": "related_term"
    },
    {
      "source": "glossary:synthetic-media",
      "target": "glossary:non-consensual-intimate-imagery",
      "relationship": "related_term"
    },
    {
      "source": "glossary:synthetic-media",
      "target": "glossary:disinformation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:system-prompt",
      "target": "glossary:prompt-injection",
      "relationship": "related_term"
    },
    {
      "source": "glossary:system-prompt",
      "target": "glossary:large-language-model",
      "relationship": "related_term"
    },
    {
      "source": "glossary:system-prompt",
      "target": "glossary:guardrail",
      "relationship": "related_term"
    },
    {
      "source": "glossary:systemic-risk",
      "target": "glossary:cascading-failure",
      "relationship": "related_term"
    },
    {
      "source": "glossary:systemic-risk",
      "target": "glossary:alignment",
      "relationship": "related_term"
    },
    {
      "source": "glossary:systemic-risk",
      "target": "glossary:superintelligence",
      "relationship": "related_term"
    },
    {
      "source": "glossary:tracking",
      "target": "glossary:mass-surveillance",
      "relationship": "related_term"
    },
    {
      "source": "glossary:tracking",
      "target": "glossary:behavioral-profiling",
      "relationship": "related_term"
    },
    {
      "source": "glossary:tracking",
      "target": "glossary:biometric-data",
      "relationship": "related_term"
    },
    {
      "source": "glossary:tracking",
      "target": "glossary:consent",
      "relationship": "related_term"
    },
    {
      "source": "glossary:training-data",
      "target": "glossary:algorithmic-bias",
      "relationship": "related_term"
    },
    {
      "source": "glossary:training-data",
      "target": "glossary:large-language-model",
      "relationship": "related_term"
    },
    {
      "source": "glossary:training-data",
      "target": "glossary:data-leakage",
      "relationship": "related_term"
    },
    {
      "source": "glossary:transfer-learning",
      "target": "glossary:fine-tuning",
      "relationship": "related_term"
    },
    {
      "source": "glossary:transfer-learning",
      "target": "glossary:foundation-model",
      "relationship": "related_term"
    },
    {
      "source": "glossary:transfer-learning",
      "target": "glossary:training-data",
      "relationship": "related_term"
    },
    {
      "source": "glossary:transfer-learning",
      "target": "glossary:adversarial-perturbation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:trust-erosion",
      "target": "glossary:disinformation",
      "relationship": "related_term"
    },
    {
      "source": "glossary:trust-erosion",
      "target": "glossary:deepfake",
      "relationship": "related_term"
    },
    {
      "source": "glossary:trust-erosion",
      "target": "glossary:synthetic-media",
      "relationship": "related_term"
    },
    {
      "source": "glossary:vendor-lock-in",
      "target": "glossary:black-box-system",
      "relationship": "related_term"
    },
    {
      "source": "glossary:vendor-lock-in",
      "target": "glossary:data-concentration",
      "relationship": "related_term"
    },
    {
      "source": "glossary:vendor-lock-in",
      "target": "glossary:digital-monopoly",
      "relationship": "related_term"
    },
    {
      "source": "glossary:vendor-lock-in",
      "target": "glossary:market-power",
      "relationship": "related_term"
    },
    {
      "source": "glossary:vishing",
      "target": "glossary:phishing",
      "relationship": "related_term"
    },
    {
      "source": "glossary:vishing",
      "target": "glossary:smishing",
      "relationship": "related_term"
    },
    {
      "source": "glossary:vishing",
      "target": "glossary:voice-cloning",
      "relationship": "related_term"
    },
    {
      "source": "glossary:vishing",
      "target": "glossary:deepfake",
      "relationship": "related_term"
    },
    {
      "source": "glossary:voice-cloning",
      "target": "glossary:deepfake",
      "relationship": "related_term"
    },
    {
      "source": "glossary:voice-cloning",
      "target": "glossary:synthetic-media",
      "relationship": "related_term"
    },
    {
      "source": "glossary:voice-cloning",
      "target": "glossary:vishing",
      "relationship": "related_term"
    },
    {
      "source": "glossary:voice-cloning",
      "target": "glossary:grandparent-scam",
      "relationship": "related_term"
    },
    {
      "source": "glossary:vulnerability-discovery",
      "target": "glossary:automated-vulnerability-discovery",
      "relationship": "related_term"
    },
    {
      "source": "glossary:vulnerability-discovery",
      "target": "glossary:automated-exploit",
      "relationship": "related_term"
    },
    {
      "source": "glossary:vulnerability-discovery",
      "target": "glossary:cyber-espionage",
      "relationship": "related_term"
    },
    {
      "source": "glossary:vulnerability-discovery",
      "target": "glossary:dual-use",
      "relationship": "related_term"
    },
    {
      "source": "glossary:zero-day",
      "target": "glossary:vulnerability-discovery",
      "relationship": "related_term"
    },
    {
      "source": "glossary:zero-day",
      "target": "glossary:automated-exploit",
      "relationship": "related_term"
    },
    {
      "source": "glossary:zero-day",
      "target": "glossary:malware",
      "relationship": "related_term"
    },
    {
      "source": "glossary:zero-day",
      "target": "glossary:cyber-espionage",
      "relationship": "related_term"
    }
  ]
}