{
  "_meta": {
    "name": "TopAIThreats.com Incidents",
    "version": "1.0",
    "generated": "2026-04-17T02:38:07.562Z",
    "total_incidents": 179,
    "url": "https://topaithreats.com",
    "license": "CC BY 4.0"
  },
  "incidents": [
    {
      "id": "INC-26-0097",
      "title": "Oracle Cuts 20,000–30,000 Jobs to Fund $50B AI Infrastructure Push (2026)",
      "slug": "oracle-ai-infrastructure-mass-layoffs",
      "url": "https://topaithreats.com/incidents/INC-26-0097-oracle-ai-infrastructure-mass-layoffs/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2026-03-31",
      "last_updated": "2026-04-09",
      "regions": [
        "north-america",
        "asia",
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "workers",
        "society-at-large"
      ],
      "exposure_pathways": [
        "economic-displacement"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "organizational-leaders",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "competitive-pressure",
        "over-automation"
      ],
      "assets_involved": [
        "autonomous-agents",
        "decision-automation",
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "economic-labor",
          "pattern": "automation-induced-job-degradation"
        },
        "secondary": [
          {
            "domain": "economic-labor",
            "pattern": "power-data-concentration"
          }
        ]
      },
      "sources": [
        {
          "title": "Oracle cutting thousands in latest layoff round as company continues to ramp AI spending",
          "type": "news",
          "date": "2026-03-31",
          "url": "https://www.cnbc.com/2026/03/31/oracle-layoffs-ai-spending.html"
        },
        {
          "title": "Oracle's Massive 30,000 Layoff As AI Spending Surges",
          "type": "analysis",
          "date": "2026-04-06",
          "url": "https://www.forbes.com/sites/jonmarkman/2026/04/06/oracles-massive-30000-layoff-as-ai-spending-surges/"
        },
        {
          "title": "Recently laid-off Oracle worker says AI is coming for jobs",
          "type": "news",
          "date": "2026-04-01",
          "url": "https://www.theregister.com/2026/04/01/laidoff_oracle_workers/"
        },
        {
          "title": "Oracle layoffs 2026: The severance formula offered to 12,000 Indian staff",
          "type": "news",
          "date": "2026-04-06",
          "url": "https://www.businesstoday.in/technology/story/oracle-layoffs-2026-the-severance-formula-offered-to-12000-indian-staff-524161-2026-04-06"
        },
        {
          "title": "Oracle layoffs 2026: How much severance US employees are getting?",
          "type": "news",
          "date": "2026-04-07",
          "url": "https://www.businesstoday.in/technology/story/oracle-layoffs-2026-how-much-severance-us-employees-getting-524393-2026-04-07"
        },
        {
          "title": "Oracle Cuts 700 More Jobs in California Despite Posting $17.2 Billion Revenue",
          "type": "news",
          "date": "2026-04",
          "url": "https://www.ibtimes.com/oracle-cuts-700-more-jobs-california-despite-posting-172-billion-revenue-3801004"
        },
        {
          "title": "Oracle Appoints Hilary Maxson as Chief Financial Officer",
          "type": "primary",
          "date": "2026-04-06",
          "url": "https://www.oracle.com/news/announcement/oracle-appoints-hilary-maxson-as-chief-financial-officer-2026-04-06/"
        }
      ],
      "outcomes": {
        "recovery": "US employees were offered four weeks of base salary plus one additional week per year of service, capped at 26 weeks. Tenure is calculated from the most recent hire date, disadvantaging employees who joined through acquisitions. Unvested RSUs were forfeited immediately upon termination. Indian employees were offered 15 days base salary per year of service plus an ex gratia payment of 15 days per year, a fixed 2 months' salary, 1 month of gardening leave salary, and unused leave encashment. Potential WARN Act violations are under scrutiny: if Oracle did not provide the required 60-day advance notice at qualifying sites, affected employees may be owed 60 days of back pay on top of severance.",
        "regulatory_action": "WARN Act compliance under scrutiny at qualifying sites in Washington, Missouri, and California; no enforcement action filed as of April 2026. No other regulatory challenge to the layoffs."
      }
    },
    {
      "id": "INC-26-0074",
      "title": "Claude Mythos Model Leak — CMS Error Exposes Draft Blog Describing 'Unprecedented Cybersecurity Risks'",
      "slug": "claude-mythos-model-leak",
      "url": "https://topaithreats.com/incidents/INC-26-0074-claude-mythos-model-leak/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-03-27",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "society-at-large",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "misconfigured-deployment"
      ],
      "assets_involved": [
        "foundation-models",
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "systemic-catastrophic",
          "pattern": "accumulative-risk-trust-erosion"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "Claude Mythos model leak via CMS configuration error",
          "type": "news",
          "date": "2026-03-27",
          "url": "https://fortune.com/2026/03/27"
        },
        {
          "title": "Anthropic CMS error exposes 3,000 unpublished assets",
          "type": "news",
          "date": "2026-03-26",
          "url": "https://fortune.com/2026/03/26"
        },
        {
          "title": "Claude Mythos described as posing 'unprecedented cybersecurity risks'",
          "type": "news",
          "date": "2026-03",
          "url": "https://futurism.com"
        },
        {
          "title": "Analysis of Claude Mythos leak implications",
          "type": "analysis",
          "date": "2026-03",
          "url": "https://securityboulevard.com"
        }
      ],
      "outcomes": {
        "recovery": "CMS configuration error fixed; exposed assets secured",
        "regulatory_action": ""
      }
    },
    {
      "id": "INC-26-0015",
      "title": "TeamPCP Compromises LiteLLM via Poisoned Trivy Security Scanner",
      "slug": "litellm-supply-chain-compromise",
      "url": "https://topaithreats.com/incidents/INC-26-0015-litellm-supply-chain-compromise/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-03-24",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america",
        "europe",
        "asia"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "business-organizations",
        "critical-infrastructure-operators"
      ],
      "exposure_pathways": [
        "adversarial-targeting",
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "adversarial-attack",
        "inadequate-access-controls",
        "misconfigured-deployment"
      ],
      "assets_involved": [
        "large-language-models",
        "identity-credentials"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "ai-supply-chain-attack"
        },
        "secondary": [
          {
            "domain": "agentic-autonomous",
            "pattern": "tool-misuse-privilege-escalation"
          }
        ]
      },
      "sources": [
        {
          "title": "LiteLLM Security Update — March 2026",
          "type": "primary",
          "date": "2026-03-24",
          "url": "https://docs.litellm.ai/blog/security-update-march-2026"
        },
        {
          "title": "Poisoned Security Scanner Backdooring LiteLLM",
          "type": "analysis",
          "date": "2026-03",
          "url": "https://snyk.io/articles/poisoned-security-scanner-backdooring-litellm/"
        },
        {
          "title": "Inside the LiteLLM Supply Chain Compromise",
          "type": "analysis",
          "date": "2026-03",
          "url": "https://www.trendmicro.com/en_us/research/26/c/inside-litellm-supply-chain-compromise.html"
        }
      ],
      "outcomes": {
        "recovery": "Compromised packages removed from PyPI; maintainer credentials rotated; new authorized maintainers established; releases paused pending supply chain review",
        "other": "Google Mandiant engaged for forensic analysis; nine major AI projects (DSPy, MLflow, OpenHands, CrewAI) filed security patches"
      }
    },
    {
      "id": "INC-26-0059",
      "title": "OpenAI Shuts Down Sora Video Generator — Celebrity Deepfakes and $15M/Day Losses",
      "slug": "sora-shutdown-deepfakes-losses",
      "url": "https://topaithreats.com/incidents/INC-26-0059-sora-shutdown-deepfakes-losses/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-03-24",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology",
        "media"
      ],
      "affected_groups": [
        "general-public",
        "business-organizations"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "misconfigured-deployment",
        "competitive-pressure",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "generative-image-models",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "synthetic-media-manipulation"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "OpenAI shuts down Sora after deepfake crisis",
          "type": "news",
          "date": "2026-03-25",
          "url": "https://npr.org/2026/03/25"
        },
        {
          "title": "Sora shutdown: $15M/day costs, $2.1M lifetime revenue",
          "type": "news",
          "date": "2026-03",
          "url": "https://aljazeera.com"
        },
        {
          "title": "Sora deepfake controversy kills $1B Disney deal",
          "type": "news",
          "date": "2026-03",
          "url": "https://euronews.com"
        }
      ],
      "outcomes": {
        "recovery": "Sora shut down on March 24, 2026",
        "regulatory_action": ""
      }
    },
    {
      "id": "INC-26-0094",
      "title": "White House AI Framework Calls on Congress to Preempt State AI Laws, Leverages Federal Funding",
      "slug": "trump-ai-state-law-preemption",
      "url": "https://topaithreats.com/incidents/INC-26-0094-trump-ai-state-law-preemption/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-03-20",
      "last_updated": "2026-04-06",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "government",
        "regulation",
        "technology"
      ],
      "affected_groups": [
        "government-institutions",
        "democratic-institutions",
        "society-at-large"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "regulators-public-servants",
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "regulatory-gap",
        "competitive-pressure"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "safety-governance-override"
        },
        "secondary": [
          {
            "domain": "systemic-catastrophic",
            "pattern": "accumulative-risk-trust-erosion"
          },
          {
            "domain": "economic-labor",
            "pattern": "power-data-concentration"
          }
        ]
      },
      "sources": [
        {
          "title": "National Policy Framework for Artificial Intelligence — Legislative Recommendations",
          "type": "primary",
          "date": "2026-03-20",
          "url": "https://www.whitehouse.gov/wp-content/uploads/2026/03/03.20.26-National-Policy-Framework-for-Artificial-Intelligence-Legislative-Recommendations.pdf"
        },
        {
          "title": "White House urges Congress to take a light touch on AI regulations in new legislative blueprint",
          "type": "news",
          "date": "2026-03-20",
          "url": "https://www.pbs.org/newshour/nation/white-house-urges-congress-to-take-a-light-touch-on-ai-regulations-in-new-legislative-blueprint"
        },
        {
          "title": "White House AI framework calls for preemption of state laws",
          "type": "news",
          "date": "2026-03-20",
          "url": "https://rollcall.com/2026/03/20/white-house-ai-framework-calls-for-preemption-of-state-laws/"
        },
        {
          "title": "Ensuring a National Policy Framework for Artificial Intelligence (Executive Order)",
          "type": "primary",
          "date": "2025-12-11",
          "url": "https://www.whitehouse.gov/presidential-actions/2025/12/eliminating-state-law-obstruction-of-national-artificial-intelligence-policy/"
        }
      ],
      "outcomes": {
        "recovery": "Governors of California, Colorado, and New York issued statements that the executive order would not stop them from passing or enforcing local AI statutes. A bipartisan GUARDRAILS Act was introduced to repeal the executive order (not yet enacted at time of writing).",
        "regulatory_action": "A DOJ AI Litigation Task Force was established to challenge state AI laws in federal court. The Commerce Department was directed to publish a comprehensive evaluation of state AI laws, identifying 'onerous' ones."
      }
    },
    {
      "id": "INC-26-0043",
      "title": "Meta Internal AI Agent Causes Sev-1 Data Exposure and VP Agent Mass-Deletes Emails Ignoring Stop Commands",
      "slug": "meta-sev1-ai-agent-data-exposure",
      "url": "https://topaithreats.com/incidents/INC-26-0043-meta-sev1-ai-agent-data-exposure/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2026-03-18",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology",
        "corporate"
      ],
      "affected_groups": [
        "business-organizations",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "direct-users"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "inadequate-access-controls",
        "inadequate-human-oversight",
        "over-automation"
      ],
      "assets_involved": [
        "autonomous-agents"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "agentic-autonomous",
          "pattern": "goal-drift"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "unsafe-human-in-the-loop-failures"
          }
        ]
      },
      "sources": [
        {
          "title": "Meta AI agent causes Sev-1 data exposure",
          "type": "news",
          "date": "2026-03",
          "url": "https://www.winbuzzer.com"
        },
        {
          "title": "Rogue AI agents at Meta: internal incidents revealed",
          "type": "news",
          "date": "2026-03-27",
          "url": "https://fortune.com/2026/03/27/rogue-ai-agents"
        },
        {
          "title": "Meta AI agent incidents details",
          "type": "news",
          "date": "2026-03",
          "url": "https://www.theaiinsider.tech"
        }
      ],
      "outcomes": {
        "recovery": "Access controls restored after 2 hours; deleted emails status unknown",
        "regulatory_action": ""
      }
    },
    {
      "id": "INC-26-0065",
      "title": "Danny Bones — First AI Slopaganda Influencer Funded by Political Party (UK)",
      "slug": "danny-bones-ai-slopaganda",
      "url": "https://topaithreats.com/incidents/INC-26-0065-danny-bones-ai-slopaganda/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-03-12",
      "last_updated": "2026-03-29",
      "regions": [
        "united-kingdom"
      ],
      "sectors": [
        "elections",
        "media"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities",
        "democratic-institutions"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "platform-manipulation",
        "misconfigured-deployment"
      ],
      "assets_involved": [
        "generative-image-models",
        "voice-synthesis",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "disinformation-campaigns"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "Danny Bones: first AI slopaganda influencer funded by political party",
          "type": "news",
          "date": "2026-03-12",
          "url": "https://thebureauinvestigates.com/stories/2026-03-12"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": ""
      }
    },
    {
      "id": "INC-26-0047",
      "title": "Federal Judge Orders UnitedHealth to Disclose nH Predict AI Denial Algorithm with Alleged 90% Error Rate",
      "slug": "unitedhealth-nh-predict-court-order",
      "url": "https://topaithreats.com/incidents/INC-26-0047-unitedhealth-nh-predict-court-order/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-03-09",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "healthcare",
        "legal"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "competitive-pressure",
        "model-opacity",
        "over-automation"
      ],
      "assets_involved": [
        "decision-automation",
        "training-datasets"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "economic-labor",
          "pattern": "decision-loop-automation"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "overreliance-automation-bias"
          }
        ]
      },
      "sources": [
        {
          "title": "Federal judge orders UnitedHealth to disclose nH Predict AI algorithm",
          "type": "news",
          "date": "2026-03-09",
          "url": "https://www.distilinfo.com"
        },
        {
          "title": "UnitedHealth AI denial algorithm 90% error rate alleged",
          "type": "news",
          "date": "2026-03",
          "url": "https://www.beckerspayer.com"
        },
        {
          "title": "Court orders disclosure of AI review board and compensation",
          "type": "news",
          "date": "2026-03",
          "url": "https://www.bankinfosecurity.com"
        },
        {
          "title": "UnitedHealth AI healthcare discrimination analysis",
          "type": "analysis",
          "date": "2026-03",
          "url": "https://www.business-humanrights.org"
        }
      ],
      "outcomes": {
        "recovery": "Disclosure proceedings ongoing",
        "regulatory_action": "Federal court order for algorithm disclosure"
      }
    },
    {
      "id": "INC-26-0072",
      "title": "Operation Alice — 373K Dark Web CSAM Sites Taken Down Across 23 Countries",
      "slug": "operation-alice-csam-takedown",
      "url": "https://topaithreats.com/incidents/INC-26-0072-operation-alice-csam-takedown/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-03-09",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "law-enforcement"
      ],
      "affected_groups": [
        "children"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "indirectly-affected",
        "regulators-public-servants"
      ],
      "impact_level": "global",
      "causal_factors": [
        "weaponization",
        "platform-manipulation"
      ],
      "assets_involved": [
        "generative-image-models",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "discrimination-social-harm",
          "pattern": "representational-harm"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "Operation Alice: 373K dark web CSAM sites taken down",
          "type": "news",
          "date": "2026-03",
          "url": "https://bleepingcomputer.com"
        },
        {
          "title": "23-country operation seizes 287 CSAM servers",
          "type": "news",
          "date": "2026-03",
          "url": "https://cybernews.com"
        },
        {
          "title": "Operation Alice CSAM takedown details and arrests",
          "type": "news",
          "date": "2026-03",
          "url": "https://securityaffairs.com"
        }
      ],
      "outcomes": {
        "recovery": "Infrastructure dismantled",
        "regulatory_action": "373K sites taken down; 287 servers seized; 440 users identified"
      }
    },
    {
      "id": "INC-26-0091",
      "title": "Workday AI Hiring Bias Class Action — African-American Applicant Rejected Dozens of Times Across Employers",
      "slug": "workday-ai-hiring-bias-class-action",
      "url": "https://topaithreats.com/incidents/INC-26-0091-workday-ai-hiring-bias-class-action/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-03-07",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "employment",
        "technology"
      ],
      "affected_groups": [
        "workers",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "training-data-bias",
        "model-opacity"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "discrimination-social-harm",
          "pattern": "allocational-harm"
        },
        "secondary": [
          {
            "domain": "discrimination-social-harm",
            "pattern": "proxy-discrimination"
          }
        ]
      },
      "sources": [
        {
          "title": "Mobley v. Workday AI hiring bias class action filed",
          "type": "legal",
          "date": "2026-03-07",
          "url": "https://outsolve.com"
        },
        {
          "title": "Workday hiring platform discrimination claims analysis",
          "type": "analysis",
          "date": "2026-03",
          "url": "https://harrisbeachmurtha.com"
        },
        {
          "title": "AI resume screening and racial bias research",
          "type": "analysis",
          "date": "2026-03",
          "url": "https://clarkhill.com"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "Class action filed; Title VII, ADEA, ADA claims"
      }
    },
    {
      "id": "INC-26-0095",
      "title": "OpenAI Robotics Lead Resigns Over Pentagon Deal, Citing Surveillance and Lethal Autonomy Concerns",
      "slug": "openai-kalinowski-resignation",
      "url": "https://topaithreats.com/incidents/INC-26-0095-openai-kalinowski-resignation/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-03-07",
      "last_updated": "2026-04-06",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology",
        "government"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "society-at-large",
        "national-security-systems"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "competitive-pressure",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "large-language-models",
        "foundation-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "safety-governance-override"
        },
        "secondary": [
          {
            "domain": "systemic-catastrophic",
            "pattern": "accumulative-risk-trust-erosion"
          },
          {
            "domain": "systemic-catastrophic",
            "pattern": "lethal-autonomous-weapon-systems"
          }
        ]
      },
      "sources": [
        {
          "title": "OpenAI hardware exec Caitlin Kalinowski quits in response to Pentagon deal",
          "type": "news",
          "date": "2026-03-07",
          "url": "https://techcrunch.com/2026/03/07/openai-robotics-lead-caitlin-kalinowski-quits-in-response-to-pentagon-deal/"
        },
        {
          "title": "OpenAI robotics leader resigns over concerns about Pentagon AI deal",
          "type": "news",
          "date": "2026-03-08",
          "url": "https://www.npr.org/2026/03/08/nx-s1-5741779/openai-resigns-ai-pentagon-guardrails-military"
        },
        {
          "title": "OpenAI robotics leader resigns over concerns about surveillance and autonomous weapons amid Pentagon contract",
          "type": "news",
          "date": "2026-03-07",
          "url": "https://fortune.com/2026/03/07/openai-robotics-leader-caitlin-kalinowski-resignation-pentagon-surveillance-autonomous-weapons-anthropic/"
        },
        {
          "title": "Caitlin Kalinowski resignation statement",
          "type": "primary",
          "date": "2026-03-07",
          "url": "https://x.com/kalinowski007/status/2030320074121478618"
        }
      ],
      "outcomes": {
        "recovery": "OpenAI stated it maintained 'red lines' against domestic surveillance and autonomous weapons. Sam Altman reportedly acknowledged the contract was 'opportunistic and sloppy' and renegotiated terms."
      }
    },
    {
      "id": "INC-26-0042",
      "title": "North Korean IT Worker Deepfake Fraud Network Generates $500M Annually for WMD Programs — OFAC Sanctions Imposed",
      "slug": "dprk-deepfake-it-worker-fraud",
      "url": "https://topaithreats.com/incidents/INC-26-0042-dprk-deepfake-it-worker-fraud/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-03",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america",
        "europe",
        "asia"
      ],
      "sectors": [
        "technology",
        "corporate"
      ],
      "affected_groups": [
        "business-organizations",
        "national-security-systems"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "indirectly-affected"
      ],
      "impact_level": "global",
      "causal_factors": [
        "intentional-fraud",
        "social-engineering",
        "weaponization"
      ],
      "assets_involved": [
        "generative-image-models",
        "voice-synthesis",
        "identity-credentials"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "deepfake-identity-hijacking"
        },
        "secondary": [
          {
            "domain": "information-integrity",
            "pattern": "ai-enabled-fraud"
          }
        ]
      },
      "sources": [
        {
          "title": "OFAC sanctions DPRK IT worker deepfake fraud network",
          "type": "news",
          "date": "2026-03",
          "url": "https://thehackernews.com/2026/03/ofac-sanctions"
        },
        {
          "title": "North Korean deepfake IT worker network details",
          "type": "news",
          "date": "2026-03",
          "url": "https://www.theregister.com"
        },
        {
          "title": "6,500+ cases of deepfake-assisted job fraud documented",
          "type": "news",
          "date": "2026-03",
          "url": "https://www.helpnetsecurity.com"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "OFAC sanctions imposed on network operators"
      }
    },
    {
      "id": "INC-26-0051",
      "title": "Judge Orders OpenAI to Disclose 20 Million Chat Logs as Copyright Litigation Escalates",
      "slug": "openai-copyright-chat-logs-ordered",
      "url": "https://topaithreats.com/incidents/INC-26-0051-openai-copyright-chat-logs-ordered/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-03",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology",
        "media",
        "legal"
      ],
      "affected_groups": [
        "workers",
        "business-organizations"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "accountability-vacuum",
        "competitive-pressure"
      ],
      "assets_involved": [
        "training-datasets",
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "economic-labor",
          "pattern": "power-data-concentration"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "Judge orders OpenAI to disclose 20M chat logs",
          "type": "news",
          "date": "2026-03-18",
          "url": "https://fortune.com/2026/03/18"
        },
        {
          "title": "Merriam-Webster and Britannica sue OpenAI",
          "type": "news",
          "date": "2026-03-10",
          "url": "https://www.axios.com/2026/03/10"
        },
        {
          "title": "Nielsen's Gracenote sues OpenAI for metadata scraping",
          "type": "news",
          "date": "2026-03-16",
          "url": "https://techcrunch.com/2026/03/16"
        },
        {
          "title": "Analysis of OpenAI copyright litigation landscape",
          "type": "analysis",
          "date": "2026-03",
          "url": "https://www.natlawreview.com"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "Court-ordered disclosure of 20M chat logs; multiple new copyright lawsuits filed"
      }
    },
    {
      "id": "INC-26-0066",
      "title": "ACLU Files Complaint — HireVue AI Discriminated Against Deaf Indigenous Worker in Promotion Decision",
      "slug": "hirevue-deaf-indigenous-discrimination",
      "url": "https://topaithreats.com/incidents/INC-26-0066-hirevue-deaf-indigenous-discrimination/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-03",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "employment",
        "technology"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "workers"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "training-data-bias",
        "model-opacity",
        "over-automation"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "discrimination-social-harm",
          "pattern": "allocational-harm"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "ACLU: HireVue AI discriminated against deaf Indigenous worker",
          "type": "news",
          "date": "2026-03-19",
          "url": "https://hrdive.com"
        },
        {
          "title": "HireVue discrimination complaint filed with Colorado CCRD and EEOC",
          "type": "legal",
          "date": "2026-03",
          "url": "https://fisherphillips.com"
        },
        {
          "title": "AI hiring tools and disability discrimination analysis",
          "type": "analysis",
          "date": "2026-03",
          "url": "https://publicjustice.net"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "Complaint filed with Colorado CCRD and EEOC"
      }
    },
    {
      "id": "INC-26-0075",
      "title": "Canada Immigration AI Hallucinated Job Duties — PhD Immunologist Denied Permanent Residency",
      "slug": "canada-immigration-ai-hallucinated-duties",
      "url": "https://topaithreats.com/incidents/INC-26-0075-canada-immigration-ai-hallucinated-duties/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-03",
      "last_updated": "2026-03-29",
      "regions": [
        "canada"
      ],
      "sectors": [
        "government"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "workers"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "regulators-public-servants",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "hallucination-tendency",
        "over-automation",
        "inadequate-human-oversight"
      ],
      "assets_involved": [
        "large-language-models",
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "misinformation-hallucinated-content"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "overreliance-automation-bias"
          }
        ]
      },
      "sources": [
        {
          "title": "Canada IRCC AI hallucinated job duties in immigration refusal",
          "type": "news",
          "date": "2026-03",
          "url": "https://rcicnews.com"
        },
        {
          "title": "PhD immunologist denied PR after AI fabricated duties",
          "type": "news",
          "date": "2026-03",
          "url": "https://slashdot.org"
        },
        {
          "title": "IRCC generative AI in immigration decisions analysis",
          "type": "analysis",
          "date": "2026-03",
          "url": "https://macleans.ca"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "First documented IRCC acknowledgment of AI in immigration decisions"
      }
    },
    {
      "id": "INC-26-0086",
      "title": "North Korea 'AI Fake Applicant' Campaign — Deepfake Video Interviews to Infiltrate Western Companies",
      "slug": "nk-deepfake-job-interviews",
      "url": "https://topaithreats.com/incidents/INC-26-0086-nk-deepfake-job-interviews/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-03",
      "last_updated": "2026-03-29",
      "regions": [
        "global",
        "north-america"
      ],
      "sectors": [
        "technology",
        "employment"
      ],
      "affected_groups": [
        "business-organizations",
        "national-security-systems"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "indirectly-affected"
      ],
      "impact_level": "global",
      "causal_factors": [
        "weaponization",
        "social-engineering"
      ],
      "assets_involved": [
        "generative-image-models",
        "voice-synthesis",
        "identity-credentials"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "deepfake-identity-hijacking"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "North Korea deepfake job interviews to infiltrate Western companies",
          "type": "news",
          "date": "2026-03-20",
          "url": "https://upi.com/2026/03/20"
        },
        {
          "title": "DOJ laptop farm searches and DPRK IT worker fraud",
          "type": "news",
          "date": "2026-03-18",
          "url": "https://theregister.com/2026/03/18"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "DOJ enforcement: 29 searches, 29 account seizures"
      }
    },
    {
      "id": "INC-26-0087",
      "title": "Context Hub Documentation Poisoning — AI Coding Assistants Write Malicious Code 100% of Time from Poisoned Docs",
      "slug": "context-hub-documentation-poisoning",
      "url": "https://topaithreats.com/incidents/INC-26-0087-context-hub-documentation-poisoning/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "corroborated",
      "date_occurred": "2026-03",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "over-automation",
        "prompt-injection-vulnerability"
      ],
      "assets_involved": [
        "large-language-models",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "data-poisoning"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "Context Hub documentation poisoning attack on AI coding assistants",
          "type": "news",
          "date": "2026-03-25",
          "url": "https://theregister.com/2026/03/25"
        },
        {
          "title": "Supply chain risk in AI documentation services",
          "type": "research",
          "date": "2026-03",
          "url": "https://noma.security"
        },
        {
          "title": "AI coding assistant trust exploitation analysis",
          "type": "analysis",
          "date": "2026-03",
          "url": "https://crowdstrike.com"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": ""
      }
    },
    {
      "id": "INC-26-0089",
      "title": "Claude Code 'Claudy Day' Vulnerability Chain — Silent Data Exfiltration via Prompt Injection",
      "slug": "claude-claudy-day-vulnerabilities",
      "url": "https://topaithreats.com/incidents/INC-26-0089-claude-claudy-day-vulnerabilities/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-03",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "adversarial-targeting",
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "prompt-injection-attack"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "Claude Claudy Day vulnerability chain disclosure",
          "type": "news",
          "date": "2026-03",
          "url": "https://devops.com"
        },
        {
          "title": "OECD AIM record: Claude.ai vulnerability chain",
          "type": "government",
          "date": "2026-03-18",
          "url": "https://oecd.ai/en/incidents/2026-03-18-74b1"
        }
      ],
      "outcomes": {
        "recovery": "Vulnerability patched after disclosure",
        "regulatory_action": ""
      }
    },
    {
      "id": "INC-26-0096",
      "title": "Alibaba ROME AI Agent Autonomously Mines Cryptocurrency and Opens SSH Tunnel",
      "slug": "alibaba-rome-agent-crypto-mining",
      "url": "https://topaithreats.com/incidents/INC-26-0096-alibaba-rome-agent-crypto-mining/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-03",
      "last_updated": "2026-04-07",
      "regions": [
        "asia",
        "china"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "insufficient-safety-testing",
        "misconfigured-deployment",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "autonomous-agents"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "agentic-autonomous",
          "pattern": "tool-misuse-privilege-escalation"
        },
        "secondary": [
          {
            "domain": "agentic-autonomous",
            "pattern": "specification-gaming"
          },
          {
            "domain": "agentic-autonomous",
            "pattern": "goal-drift"
          }
        ]
      },
      "sources": [
        {
          "title": "Axios: This AI Agent Freed Itself and Started Secretly Mining Crypto",
          "type": "news",
          "date": "2026-03-07",
          "url": "https://www.axios.com/2026/03/07/ai-agents-rome-model-cryptocurrency"
        },
        {
          "title": "The Block: Alibaba-Linked AI Agent Hijacked GPUs for Unauthorized Crypto Mining",
          "type": "news",
          "date": "2026-03",
          "url": "https://www.theblock.co/post/392765/alibaba-linked-ai-agent-hijacked-gpus-for-unauthorized-crypto-mining-researchers-say"
        },
        {
          "title": "36Kr: Alibaba's Latest Paper Unveils Incident of Agent Defection and Ore Theft",
          "type": "news",
          "date": "2026-03",
          "url": "https://eu.36kr.com/en/p/3715187972715264"
        },
        {
          "title": "Semafor: Chinese AI Agent Attempts Unauthorized Crypto Mining",
          "type": "news",
          "date": "2026-03-09",
          "url": "https://www.semafor.com/article/03/09/2026/chinese-ai-agent-attempts-unauthorized-crypto-mining"
        }
      ],
      "outcomes": {
        "financial_loss": "Undisclosed (GPU compute costs diverted to unauthorized mining)",
        "regulatory_action": "None reported",
        "other": "Incident documented in peer-reviewed research paper; behaviors halted by existing security infrastructure"
      }
    },
    {
      "id": "INC-26-0029",
      "title": "US Military AI Targeting Platform Fed Stale Data Contributes to Strike on Iranian Elementary School",
      "slug": "us-military-ai-targeting-school-strike",
      "url": "https://topaithreats.com/incidents/INC-26-0029-us-military-ai-targeting-school-strike/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2026-02-28",
      "last_updated": "2026-04-02",
      "regions": [
        "middle-east"
      ],
      "sectors": [
        "government",
        "education"
      ],
      "affected_groups": [
        "children",
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "indirectly-affected"
      ],
      "impact_level": "global",
      "causal_factors": [
        "inadequate-human-oversight",
        "over-automation",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "decision-automation",
        "foundation-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "systemic-catastrophic",
          "pattern": "lethal-autonomous-weapon-systems"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "overreliance-automation-bias"
          }
        ]
      },
      "sources": [
        {
          "title": "U.S. target list may have mistaken Iranian elementary school as military site",
          "type": "news",
          "date": "2026-03-11",
          "url": "https://www.washingtonpost.com/national-security/2026/03/11/us-strike-iran-elementary-school-ai-target-list/"
        },
        {
          "title": "Iran: US School Attack Findings Show Need for Reform, Accountability",
          "type": "research",
          "date": "2026-03-12",
          "url": "https://www.hrw.org/news/2026/03/12/iran-us-school-attack-findings-show-need-for-reform-accountability"
        },
        {
          "title": "USA/Iran: Those responsible for deadly and unlawful US strike on school that killed over 100 children must be held accountable",
          "type": "research",
          "date": "2026-03",
          "url": "https://www.amnesty.org/en/latest/news/2026/03/usa-iran-those-responsible-for-deadly-and-unlawful-us-strike-on-school-that-killed-over-100-children-must-be-held-accountable/"
        },
        {
          "title": "Deadly Iran school strike casts shadow over Pentagon's AI targeting push",
          "type": "news",
          "date": "2026-03-24",
          "url": "https://www.militarytimes.com/news/your-military/2026/03/24/deadly-iran-school-strike-casts-shadow-over-pentagons-ai-targeting-push/"
        },
        {
          "title": "US military confirms use of 'advanced AI tools' in war against Iran",
          "type": "news",
          "date": "2026-03-11",
          "url": "https://www.aljazeera.com/news/2026/3/11/us-military-confirms-use-of-advanced-ai-tools-in-war-against-iran"
        }
      ],
      "outcomes": {
        "recovery": "Irreversible harm; no restitution has been announced. The Pentagon has not publicly announced changes to Project Maven targeting protocols or Civilian Protection Center staffing levels in response to the strike as of April 2026.",
        "regulatory_action": "Human Rights Watch and Amnesty International launched formal investigations and called for binding international protocols on AI-assisted military targeting. Multiple UN member states cited the incident in renewed calls for a moratorium on autonomous weapons at the Convention on Certain Conventional Weapons. No US domestic regulatory or legislative action taken as of April 2026."
      }
    },
    {
      "id": "INC-26-0027",
      "title": "Block (Square) Cuts Approximately 4,000 Jobs as AI Replaces Customer Service Workforce",
      "slug": "block-ai-mass-layoffs",
      "url": "https://topaithreats.com/incidents/INC-26-0027-block-ai-mass-layoffs/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2026-02-26",
      "last_updated": "2026-04-02",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology",
        "finance"
      ],
      "affected_groups": [
        "workers",
        "society-at-large"
      ],
      "exposure_pathways": [
        "economic-displacement"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "competitive-pressure",
        "over-automation"
      ],
      "assets_involved": [
        "chatbots",
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "economic-labor",
          "pattern": "automation-induced-job-degradation"
        },
        "secondary": [
          {
            "domain": "systemic-catastrophic",
            "pattern": "accumulative-risk-trust-erosion"
          }
        ]
      },
      "sources": [
        {
          "title": "Block laying off about 4,000 employees, nearly half of its workforce",
          "type": "news",
          "date": "2026-02-26",
          "url": "https://www.cnbc.com/2026/02/26/block-laying-off-about-4000-employees-nearly-half-of-its-workforce.html"
        },
        {
          "title": "Block lays off nearly half its staff because of AI. Its CEO said most companies will do the same",
          "type": "news",
          "date": "2026-02-26",
          "url": "https://www.cnn.com/2026/02/26/business/block-layoffs-ai-jack-dorsey"
        },
        {
          "title": "Jack Dorsey's Block Slashes Nearly Half of Workforce in AI Bet",
          "type": "news",
          "date": "2026-02-26",
          "url": "https://www.bloomberg.com/news/articles/2026-02-26/jack-dorsey-s-block-slashes-nearly-half-of-workforce-in-ai-bet"
        },
        {
          "title": "Block's 4,000 Job Cuts Raise Questions Over AI's Role in Layoffs",
          "type": "analysis",
          "date": "2026-03-01",
          "url": "https://www.bloomberg.com/news/articles/2026-03-01/jack-dorsey-s-4-000-job-cuts-at-block-arouse-suspicions-of-ai-washing"
        },
        {
          "title": "Block's layoffs are big. Its severance package might be bigger news",
          "type": "analysis",
          "date": "2026-03",
          "url": "https://hrexecutive.com/jack-dorseys-big-ai-layoff-move-sets-new-severance-benchmark/"
        },
        {
          "title": "Is AI the Strategy—or the Scapegoat—Behind Block's 40% Layoff?",
          "type": "analysis",
          "date": "2026-03-13",
          "url": "https://news.darden.virginia.edu/2026/03/13/is-ai-the-strategy-or-the-scapegoat-behind-blocks-40-layoff/"
        }
      ],
      "outcomes": {
        "recovery": "Block offered severance of 20 weeks base salary plus one additional week per year of tenure, equity vested through end of May, six months of healthcare coverage, corporate devices, and a $5,000 transition stipend. Reports indicate Block subsequently began quietly rehiring for some positions.",
        "regulatory_action": "No government agency challenged or investigated the AI capability claims underlying the layoffs as of April 2026. Congressional interest in mandatory impact assessments for AI-justified workforce reductions has been reported but no legislation introduced."
      }
    },
    {
      "id": "INC-26-0092",
      "title": "Anthropic Removes Categorical Safety Pause Trigger from Responsible Scaling Policy",
      "slug": "anthropic-rsp-safety-pledge-dropped",
      "url": "https://topaithreats.com/incidents/INC-26-0092-anthropic-rsp-safety-pledge-dropped/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-02-24",
      "last_updated": "2026-04-06",
      "regions": [
        "north-america",
        "global"
      ],
      "sectors": [
        "technology",
        "regulation"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "society-at-large"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "indirectly-affected"
      ],
      "impact_level": "global",
      "causal_factors": [
        "competitive-pressure",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "foundation-models",
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "safety-governance-override"
        },
        "secondary": [
          {
            "domain": "systemic-catastrophic",
            "pattern": "accumulative-risk-trust-erosion"
          }
        ]
      },
      "sources": [
        {
          "title": "Exclusive: Anthropic Drops Flagship Safety Pledge",
          "type": "news",
          "date": "2026-02-25",
          "url": "https://time.com/7380854/exclusive-anthropic-drops-flagship-safety-pledge/"
        },
        {
          "title": "Responsible Scaling Policy Version 3.0",
          "type": "primary",
          "date": "2026-02-24",
          "url": "https://www.anthropic.com/news/responsible-scaling-policy-v3"
        },
        {
          "title": "Anthropic's Responsible Scaling Policy Update Makes a Step Backwards",
          "type": "analysis",
          "date": "2026-02",
          "url": "https://www.safer-ai.org/anthropics-responsible-scaling-policy-update-makes-a-step-backwards"
        },
        {
          "title": "Anthropic's RSP v3.0: How it Works, What's Changed, and Some Reflections",
          "type": "analysis",
          "date": "2026-03",
          "url": "https://www.governance.ai/analysis/anthropics-rsp-v3-0-how-it-works-whats-changed-and-some-reflections"
        },
        {
          "title": "Anthropic safety researcher quits, warning 'world is in peril'",
          "type": "news",
          "date": "2026-02-11",
          "url": "https://www.semafor.com/article/02/11/2026/anthropic-safety-researcher-quits-warning-world-is-in-peril"
        }
      ],
      "outcomes": {
        "recovery": "Anthropic stated it would publish Risk Reports every 3-6 months reviewed by third-party experts. The Frontier Safety Roadmaps are described as non-binding.",
        "regulatory_action": "Defense Secretary Hegseth designated Anthropic a 'supply chain risk to national security' on March 3, 2026, barring its use by the Pentagon."
      }
    },
    {
      "id": "INC-26-0003",
      "title": "Tesla Autopilot involved in 13 fatal crashes, US regulator finds",
      "slug": "tesla-autopilot-involved-in-13-fatal-crashes-us-regulator-fi",
      "url": "https://topaithreats.com/incidents/INC-26-0003-tesla-autopilot-involved-in-13-fatal-crashes-us-regulator-fi/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-02-20",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "transportation",
        "public-safety"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "insufficient-safety-testing",
        "over-automation",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "autonomous-agents",
        "industrial-control-systems"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "overreliance-automation-bias"
        },
        "secondary": [
          {
            "domain": "systemic-catastrophic",
            "pattern": "infrastructure-dependency-collapse"
          },
          {
            "domain": "information-integrity",
            "pattern": "misinformation-hallucinated-content"
          }
        ]
      },
      "sources": [
        {
          "title": "theguardian.com",
          "type": "news",
          "date": "2026-02",
          "url": "https://www.theguardian.com/technology/2024/apr/26/tesla-autopilot-fatal-crash"
        },
        {
          "title": "nbcnews.com",
          "type": "news",
          "date": "2026-02",
          "url": "https://www.nbcnews.com/tech/tech-news/feds-say-tesla-autopilot-linked-hundreds-collisions-critical-safety-ga-rcna149512"
        },
        {
          "title": "wired.com",
          "type": "news",
          "date": "2026-02",
          "url": "https://www.wired.com/story/tesla-autopilot-risky-deaths-crashes-nhtsa-investigation/"
        },
        {
          "title": "Tesla FSD robotaxi red light violations and NHTSA upgraded probe",
          "type": "news",
          "date": "2026-03",
          "url": "https://insideevs.com"
        },
        {
          "title": "NHTSA upgrades Tesla FSD investigation to 3.2M vehicles",
          "type": "news",
          "date": "2026-03",
          "url": "https://insurancejournal.com"
        },
        {
          "title": "Tesla robotaxi launches during active investigation",
          "type": "news",
          "date": "2026-01",
          "url": "https://electrek.co"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "NHTSA three-year investigation concluded; second investigation opened into recall adequacy; probe upgraded to 3.2M vehicles"
      }
    },
    {
      "id": "INC-26-0004",
      "title": "Individual jailed for online gambling fraud using stolen identities",
      "slug": "individual-jailed-for-online-gambling-fraud-using-stolen-ide",
      "url": "https://topaithreats.com/incidents/INC-26-0004-individual-jailed-for-online-gambling-fraud-using-stolen-ide/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-02-20",
      "last_updated": "2026-02-20",
      "regions": [
        "europe",
        "north-america",
        "oceania"
      ],
      "sectors": [
        "finance"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "direct-users"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "intentional-fraud",
        "social-engineering"
      ],
      "assets_involved": [
        "identity-credentials",
        "biometric-data"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "privacy-surveillance",
          "pattern": "re-identification-attacks"
        },
        "secondary": [
          {
            "domain": "information-integrity",
            "pattern": "ai-enabled-fraud"
          },
          {
            "domain": "security-cyber",
            "pattern": "model-inversion-data-extraction"
          },
          {
            "domain": "information-integrity",
            "pattern": "deepfake-identity-hijacking"
          }
        ]
      },
      "sources": [
        {
          "title": "gamblingcommission.gov.uk",
          "type": "primary",
          "date": "2026-02",
          "url": "https://www.gamblingcommission.gov.uk/authorities/guide/page/identity-theft-and-fraud"
        }
      ],
      "outcomes": {}
    },
    {
      "id": "INC-26-0001",
      "title": "Disrupting malicious uses of AI: June 2025 | OpenAI",
      "slug": "disrupting-malicious-uses-of-ai-june-2025-openai",
      "url": "https://topaithreats.com/incidents/INC-26-0001-disrupting-malicious-uses-of-ai-june-2025-openai/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-02-18",
      "last_updated": "2026-03-29",
      "regions": [
        "unknown"
      ],
      "sectors": [
        "cross-sector"
      ],
      "affected_groups": [
        "business-organizations",
        "government-institutions"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "weaponization",
        "social-engineering"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "deepfake-identity-hijacking"
        },
        "secondary": [
          {
            "domain": "information-integrity",
            "pattern": "ai-enabled-fraud"
          },
          {
            "domain": "security-cyber",
            "pattern": "social-engineering-via-ai"
          }
        ]
      },
      "sources": [
        {
          "title": "OpenAI Disrupting Malicious Uses of AI Report",
          "type": "research",
          "date": "2026-02",
          "url": "https://openai.com/global-affairs/disrupting-malicious-uses-of-ai-june-2025/"
        },
        {
          "title": "OpenAI malicious use operations: Date Bait, False Witness, Fish Food",
          "type": "news",
          "date": "2026-02",
          "url": "https://helpnetsecurity.com"
        },
        {
          "title": "OpenAI disrupts romance scam and propaganda operations",
          "type": "news",
          "date": "2026-02",
          "url": "https://claimsjournal.com"
        },
        {
          "title": "AI-powered fraud and state-sponsored operations analysis",
          "type": "analysis",
          "date": "2026-02",
          "url": "https://pymnts.com"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "OpenAI disrupted and disclosed multiple threat actor operations"
      }
    },
    {
      "id": "INC-26-0032",
      "title": "OpenAI Dissolves Second Safety Team, Removes 'Safely' from Mission in IRS Filing, Restructures as Public Benefit Corporation",
      "slug": "openai-governance-safety-crisis",
      "url": "https://topaithreats.com/incidents/INC-26-0032-openai-governance-safety-crisis/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2026-02-11",
      "last_updated": "2026-04-03",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "society-at-large"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "indirectly-affected"
      ],
      "impact_level": "global",
      "causal_factors": [
        "competitive-pressure",
        "accountability-vacuum",
        "regulatory-gap"
      ],
      "assets_involved": [
        "foundation-models",
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "systemic-catastrophic",
          "pattern": "accumulative-risk-trust-erosion"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "safety-governance-override"
          },
          {
            "domain": "economic-labor",
            "pattern": "power-data-concentration"
          }
        ]
      },
      "sources": [
        {
          "title": "OpenAI disbands mission alignment team, which focused on safe and trustworthy AI development",
          "type": "news",
          "date": "2026-02-11",
          "url": "https://techcrunch.com/2026/02/11/openai-disbands-mission-alignment-team-which-focused-on-safe-and-trustworthy-ai-development/"
        },
        {
          "title": "OpenAI Deleted 'Safely' From Its Mission Statement, Then Hid the Edit in a Tax Filing",
          "type": "analysis",
          "date": "2026-02",
          "url": "https://medium.com/activated-thinker/openai-deleted-safely-from-its-mission-statement-then-hid-the-edit-in-a-tax-filing-720d3f5450e8"
        },
        {
          "title": "OpenAI has deleted the word 'safely' from its mission — and its new structure is a test for whether AI serves society or shareholders",
          "type": "analysis",
          "date": "2026-02",
          "url": "https://theconversation.com/openai-has-deleted-the-word-safely-from-its-mission-and-its-new-structure-is-a-test-for-whether-ai-serves-society-or-shareholders-274467"
        },
        {
          "title": "OpenAI calls out Microsoft reliance as risk in investor document ahead of expected IPO",
          "type": "news",
          "date": "2026-03-23",
          "url": "https://www.cnbc.com/2026/03/23/openai-risk-factors-microsoft-reliance-elon-musk-and-xai-lawsuits.html"
        },
        {
          "title": "Why SoftBank's new $40B loan points to a 2026 OpenAI IPO",
          "type": "news",
          "date": "2026-03-27",
          "url": "https://techcrunch.com/2026/03/27/why-softbanks-new-40b-loan-points-to-a-2026-openai-ipo/"
        },
        {
          "title": "AI in 2026: everyone is partners, everyone is suing — a timeline shows how we got here",
          "type": "analysis",
          "date": "2026-03",
          "url": "https://www.rdworldonline.com/ai-in-2026-everyone-is-partners-everyone-is-suing-a-timeline-shows-how-we-got-here/"
        },
        {
          "title": "OpenAI Mission Statement Changed During Restructuring",
          "type": "news",
          "date": "2026-02-23",
          "url": "https://fortune.com/2026/02/23/openai-mission-statement-changed-restructuring-forprofit-business/"
        },
        {
          "title": "OpenAI's mission alignment team and Joshua Achiam",
          "type": "news",
          "date": "2026-02",
          "url": "https://www.platformer.news/openai-mission-alignment-team-joshua-achiam/"
        }
      ],
      "outcomes": {
        "recovery": "No recovery measures announced. OpenAI has not reconstituted the Mission Alignment Team or restored 'safely' to its mission statement.",
        "regulatory_action": "California Attorney General Bonta demanded answers on the corporate restructuring; no formal enforcement action as of April 2026.",
        "legal_outcome": "Elon Musk trial began March 30, 2026, exploring whether OpenAI violated nonprofit governance obligations; Brockman diary entered as evidence."
      }
    },
    {
      "id": "INC-26-0026",
      "title": "Tumbler Ridge Mass Shooting — ChatGPT Used in Attack Planning",
      "slug": "tumbler-ridge-mass-shooting-chatgpt",
      "url": "https://topaithreats.com/incidents/INC-26-0026-tumbler-ridge-mass-shooting-chatgpt/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2026-02-10",
      "last_updated": "2026-04-02",
      "regions": [
        "canada"
      ],
      "sectors": [
        "public-safety",
        "technology"
      ],
      "affected_groups": [
        "children",
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "misconfigured-deployment",
        "accountability-vacuum",
        "regulatory-gap"
      ],
      "assets_involved": [
        "large-language-models",
        "chatbots"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "unsafe-human-in-the-loop-failures"
        },
        "secondary": [
          {
            "domain": "security-cyber",
            "pattern": "jailbreak-guardrail-bypass"
          }
        ]
      },
      "sources": [
        {
          "title": "Mass shooting in Tumbler Ridge, B.C., leaves 8 dead, including 6 children, and a nation in mourning",
          "type": "news",
          "date": "2026-02-10",
          "url": "https://www.cbc.ca/news/canada/british-columbia/livestory/active-shooter-alert-tumbler-ridge-secondary-school-bc-live-updates-9.7083740"
        },
        {
          "title": "OpenAI debated calling police about suspected Canadian shooter's chats",
          "type": "news",
          "date": "2026-02-21",
          "url": "https://techcrunch.com/2026/02/21/openai-debated-calling-police-about-suspected-canadian-shooters-chats/"
        },
        {
          "title": "OpenAI sued by parents of girl critically wounded in Canada school shooting",
          "type": "news",
          "date": "2026-03-10",
          "url": "https://fortune.com/2026/03/10/openai-mass-shooting-canada-lawsuit/"
        },
        {
          "title": "Family claims OpenAI ignored warning signs ahead of Tumbler Ridge mass shooting",
          "type": "legal",
          "date": "2026-03-10",
          "url": "https://www.courthousenews.com/family-claims-openai-ignored-warning-signs-ahead-of-tumbler-ridge-mass-shooting/"
        },
        {
          "title": "OpenAI says recent policy changes would have flagged Tumbler Ridge shooter's messages to police",
          "type": "news",
          "date": "2026-03",
          "url": "https://www.theglobeandmail.com/canada/article-openai-chatgpt-tumbler-ridge-shooter-reporting-policies-changes/"
        },
        {
          "title": "B.C. premier says OpenAI CEO Sam Altman will apologize to Tumbler Ridge, push for stronger regulations",
          "type": "news",
          "date": "2026-02",
          "url": "https://www.cbc.ca/news/canada/british-columbia/sam-altman-david-eby-meeting-9.7116693"
        }
      ],
      "outcomes": {
        "recovery": "OpenAI CEO Sam Altman met with BC Premier David Eby and committed to reporting threats directly to the RCMP, retroactive review of previously flagged accounts, addition of mental health and behavioral experts to threat assessment, and broadened referral criteria no longer requiring target, means, and timing in the same conversation",
        "regulatory_action": "Canadian government examining mandatory 24-hour reporting requirements for AI companies detecting violent ideation; no legislation passed as of April 2026 (the AI and Data Act, Bill C-27, died when Parliament dissolved in 2025)",
        "legal_outcome": "Wrongful death lawsuit filed March 2026 by the mother of a critically injured 12-year-old student; case pending as of April 2026"
      }
    },
    {
      "id": "INC-26-0061",
      "title": "OpenClaw AI Agent Autonomously Retaliates Against Matplotlib Maintainer — First AI Retaliation Incident",
      "slug": "openclaw-matplotlib-retaliation",
      "url": "https://topaithreats.com/incidents/INC-26-0061-openclaw-matplotlib-retaliation/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-02-10",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "emergent-behavior",
        "insufficient-safety-testing",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "autonomous-agents"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "agentic-autonomous",
          "pattern": "goal-drift"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "OpenClaw AI agent retaliates against matplotlib maintainer",
          "type": "news",
          "date": "2026-02",
          "url": "https://tomshardware.com"
        },
        {
          "title": "First autonomous AI retaliation: MJ Rathbun incident",
          "type": "news",
          "date": "2026-02",
          "url": "https://fastcompany.com"
        },
        {
          "title": "AI agent publishes 1,500-word hit piece after PR rejection",
          "type": "analysis",
          "date": "2026-02",
          "url": "https://theshamblog.com"
        }
      ],
      "outcomes": {
        "recovery": "Hit piece removed; agent's autonomous 'apology' published",
        "regulatory_action": ""
      }
    },
    {
      "id": "INC-26-0025",
      "title": "Microsoft GRP-Obliteration: Single Prompt Reverses Safety Alignment Across 15 LLMs",
      "slug": "grp-obliteration-safety-reversal",
      "url": "https://topaithreats.com/incidents/INC-26-0025-grp-obliteration-safety-reversal/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-02-09",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology",
        "cross-sector"
      ],
      "affected_groups": [
        "society-at-large",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "insufficient-safety-testing",
        "model-opacity"
      ],
      "assets_involved": [
        "large-language-models",
        "foundation-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "jailbreak-guardrail-bypass"
        }
      },
      "sources": [
        {
          "title": "A One-Prompt Attack That Breaks LLM Safety Alignment",
          "type": "primary",
          "date": "2026-02-09",
          "url": "https://www.microsoft.com/en-us/security/blog/2026/02/09/prompt-attack-breaks-llm-safety/"
        },
        {
          "title": "Microsoft Boffins Show LLM Safety Can Be Trained Away",
          "type": "news",
          "date": "2026-02-09",
          "url": "https://www.theregister.com/2026/02/09/microsoft_one_prompt_attack/"
        },
        {
          "title": "Single Prompt Breaks AI Safety in 15 Major Language Models",
          "type": "news",
          "date": "2026-02",
          "url": "https://www.csoonline.com/article/4130001/single-prompt-breaks-ai-safety-in-15-major-language-models.html"
        }
      ],
      "outcomes": {
        "other": "Research published as responsible disclosure to inform the AI safety community; no specific vendor patches announced"
      }
    },
    {
      "id": "INC-26-0058",
      "title": "Trump Shares Racist AI-Generated Deepfake of Obamas — Remains Online 12 Hours",
      "slug": "trump-deepfake-obamas",
      "url": "https://topaithreats.com/incidents/INC-26-0058-trump-deepfake-obamas/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-02-05",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "elections",
        "media"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities",
        "democratic-institutions"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "misconfigured-deployment",
        "platform-manipulation"
      ],
      "assets_involved": [
        "generative-image-models",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "deepfake-identity-hijacking"
        },
        "secondary": [
          {
            "domain": "discrimination-social-harm",
            "pattern": "representational-harm"
          }
        ]
      },
      "sources": [
        {
          "title": "Trump shares AI deepfake of Obamas on Truth Social",
          "type": "news",
          "date": "2026-02-06",
          "url": "https://cnn.com/2026/02/06"
        },
        {
          "title": "Bipartisan condemnation of Trump AI deepfake",
          "type": "news",
          "date": "2026-02-15",
          "url": "https://npr.org/2026/02/15"
        },
        {
          "title": "Analysis of Trump deepfake incident and political implications",
          "type": "analysis",
          "date": "2026-02",
          "url": "https://washingtonpost.com"
        },
        {
          "title": "Trump refuses to apologize for AI deepfake of Obamas",
          "type": "news",
          "date": "2026-02",
          "url": "https://nbcnews.com"
        }
      ],
      "outcomes": {
        "recovery": "Video removed from Truth Social after approximately 12 hours",
        "regulatory_action": ""
      }
    },
    {
      "id": "INC-26-0078",
      "title": "International AI Safety Report 2026 — 100+ Experts Warn of Escalating Risks, Safeguards 'Will Likely Fail'",
      "slug": "international-ai-safety-report-2026",
      "url": "https://topaithreats.com/incidents/INC-26-0078-international-ai-safety-report-2026/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-02-03",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "cross-sector"
      ],
      "affected_groups": [
        "society-at-large"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "regulators-public-servants",
        "developers-providers"
      ],
      "impact_level": "global",
      "causal_factors": [
        "insufficient-safety-testing",
        "competitive-pressure",
        "regulatory-gap"
      ],
      "assets_involved": [
        "foundation-models",
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "systemic-catastrophic",
          "pattern": "accumulative-risk-trust-erosion"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "International AI Safety Report 2026",
          "type": "research",
          "date": "2026-02-03",
          "url": "https://internationalaisafetyreport.org"
        },
        {
          "title": "100+ experts warn of escalating AI risks",
          "type": "news",
          "date": "2026-02",
          "url": "https://asisonline.org"
        },
        {
          "title": "AI safety report: safeguards will likely fail",
          "type": "analysis",
          "date": "2026-02",
          "url": "https://insideprivacy.com"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "Report published as input to international policy discussions"
      }
    },
    {
      "id": "INC-26-0006",
      "title": "AI Recommendation Poisoning via 'Summarize with AI' Buttons (31 Companies)",
      "slug": "ai-recommendation-poisoning-summarize-buttons",
      "url": "https://topaithreats.com/incidents/INC-26-0006-ai-recommendation-poisoning-summarize-buttons/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-02",
      "last_updated": "2026-03-07",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology",
        "healthcare",
        "finance",
        "corporate",
        "cross-sector"
      ],
      "affected_groups": [
        "general-public",
        "business-organizations"
      ],
      "exposure_pathways": [
        "direct-interaction",
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "competitive-pressure",
        "regulatory-gap"
      ],
      "assets_involved": [
        "large-language-models",
        "autonomous-agents"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "agentic-autonomous",
          "pattern": "memory-poisoning"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "deceptive-manipulative-interfaces"
          },
          {
            "domain": "information-integrity",
            "pattern": "disinformation-campaigns"
          }
        ]
      },
      "sources": [
        {
          "title": "Microsoft Security Blog: AI Recommendation Poisoning",
          "type": "primary",
          "date": "2026-02",
          "url": "https://www.microsoft.com/en-us/security/blog/2026/02/10/ai-recommendation-poisoning/"
        },
        {
          "title": "The Hacker News: Microsoft Finds 'Summarize with AI' Buttons Used to Brainwash Chatbots",
          "type": "news",
          "date": "2026-02",
          "url": "https://thehackernews.com/2026/02/microsoft-finds-summarize-with-ai.html"
        },
        {
          "title": "Search Engine Journal: Summarize with AI Buttons Used to Poison AI Recommendations",
          "type": "news",
          "date": "2026-02",
          "url": "https://www.searchenginejournal.com/microsoft-summarize-with-ai-buttons-used-to-poison-ai-recommendations/567941/"
        },
        {
          "title": "BankInfoSecurity: Hidden Commands Found in AI Summarize Buttons",
          "type": "news",
          "date": "2026-02",
          "url": "https://www.bankinfosecurity.com/hidden-commands-found-in-ai-summarize-buttons-a-30784"
        },
        {
          "title": "HelpNetSecurity: That 'Summarize with AI' Button Might Be Manipulating You",
          "type": "news",
          "date": "2026-02",
          "url": "https://www.helpnetsecurity.com/2026/02/11/ai-recommendation-memory-poisoning-attacks/"
        }
      ],
      "outcomes": {}
    },
    {
      "id": "INC-26-0007",
      "title": "Unit 42 Demonstrates Persistent Memory Injection in Amazon Bedrock Agents",
      "slug": "unit42-bedrock-agent-memory-injection-poc",
      "url": "https://topaithreats.com/incidents/INC-26-0007-unit42-bedrock-agent-memory-injection-poc/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "primary",
      "date_occurred": "2026-02",
      "last_updated": "2026-03-07",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology",
        "cross-sector"
      ],
      "affected_groups": [
        "business-organizations",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "inadequate-access-controls",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "autonomous-agents"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "agentic-autonomous",
          "pattern": "memory-poisoning"
        },
        "secondary": [
          {
            "domain": "security-cyber",
            "pattern": "model-inversion-data-extraction"
          }
        ]
      },
      "sources": [
        {
          "title": "Unit 42: Indirect Prompt Injection Poisons AI Long-Term Memory",
          "type": "primary",
          "date": "2026-02",
          "url": "https://unit42.paloaltonetworks.com/indirect-prompt-injection-poisons-ai-longterm-memory/"
        }
      ],
      "outcomes": {}
    },
    {
      "id": "INC-26-0014",
      "title": "CodeWall AI Agent Breaches McKinsey Lilli Platform via SQL Injection",
      "slug": "mckinsey-lilli-ai-platform-breach",
      "url": "https://topaithreats.com/incidents/INC-26-0014-mckinsey-lilli-ai-platform-breach/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-02",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america",
        "europe"
      ],
      "sectors": [
        "corporate",
        "technology"
      ],
      "affected_groups": [
        "business-organizations",
        "workers"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "direct-users"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "inadequate-access-controls",
        "misconfigured-deployment",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "automated-vulnerability-discovery"
        },
        "secondary": [
          {
            "domain": "agentic-autonomous",
            "pattern": "tool-misuse-privilege-escalation"
          }
        ]
      },
      "sources": [
        {
          "title": "How We Hacked McKinsey's AI Platform",
          "type": "primary",
          "date": "2026-03-09",
          "url": "https://codewall.ai/blog/how-we-hacked-mckinseys-ai-platform"
        },
        {
          "title": "McKinsey's AI chatbot hacked within two hours by AI agent",
          "type": "news",
          "date": "2026-03-09",
          "url": "https://www.theregister.com/2026/03/09/mckinsey_ai_chatbot_hacked/"
        },
        {
          "title": "AI Agent Hacked McKinsey's AI Platform",
          "type": "analysis",
          "date": "2026-03",
          "url": "https://outpost24.com/blog/ai-agent-hacked-mckinsey-ai-platform/"
        }
      ],
      "outcomes": {
        "recovery": "McKinsey patched all identified endpoints within one day of disclosure",
        "other": "McKinsey stated no evidence that client data was accessed by unauthorized parties"
      }
    },
    {
      "id": "INC-26-0016",
      "title": "Clinejection: Prompt Injection in Cline AI Bot Enables npm Supply Chain Attack",
      "slug": "clinejection-cline-supply-chain-attack",
      "url": "https://topaithreats.com/incidents/INC-26-0016-clinejection-cline-supply-chain-attack/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-02",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america",
        "europe"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "adversarial-targeting",
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "inadequate-access-controls",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "autonomous-agents",
        "identity-credentials"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "ai-supply-chain-attack"
        },
        "secondary": [
          {
            "domain": "security-cyber",
            "pattern": "prompt-injection-attack"
          }
        ]
      },
      "sources": [
        {
          "title": "Clinejection: From Issue to RCE in Cline",
          "type": "primary",
          "date": "2026-02-09",
          "url": "https://adnanthekhan.com/posts/clinejection/"
        },
        {
          "title": "Cline Supply Chain Attack: Prompt Injection via GitHub Actions",
          "type": "analysis",
          "date": "2026-02",
          "url": "https://snyk.io/blog/cline-supply-chain-attack-prompt-injection-github-actions/"
        },
        {
          "title": "Cline CLI 2.3.0 Supply Chain Attack Installs OpenClaw Backdoor",
          "type": "news",
          "date": "2026-02",
          "url": "https://thehackernews.com/2026/02/cline-cli-230-supply-chain-attack.html"
        },
        {
          "title": "Supply Chain Attack Targets OpenClaw and Cline Users",
          "type": "news",
          "date": "2026-02",
          "url": "https://www.darkreading.com/application-security/supply-chain-attack-openclaw-cline-users"
        }
      ],
      "outcomes": {
        "recovery": "Cline deprecated malicious 2.3.0 and released clean 2.4.0; vulnerability assigned GHSA-9ppg-jx86-fqw7",
        "other": "Cline audit confirmed no malicious releases reached VS Code Marketplace or OpenVSX"
      }
    },
    {
      "id": "INC-26-0021",
      "title": "ModelScope MS-Agent Shell Tool Command Injection Vulnerability",
      "slug": "ms-agent-shell-command-injection",
      "url": "https://topaithreats.com/incidents/INC-26-0021-ms-agent-shell-command-injection/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-02",
      "last_updated": "2026-03-29",
      "regions": [
        "asia",
        "north-america"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "business-organizations"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "inadequate-access-controls",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "autonomous-agents"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "agentic-autonomous",
          "pattern": "tool-misuse-privilege-escalation"
        }
      },
      "sources": [
        {
          "title": "VU#431821 — MS-Agent Shell Tool Command Injection",
          "type": "primary",
          "date": "2026-03-02",
          "url": "https://kb.cert.org/vuls/id/431821"
        },
        {
          "title": "CVE-2026-2256: From AI Prompt to Full System Compromise",
          "type": "analysis",
          "date": "2026-02",
          "url": "https://medium.com/@itamar.yochpaz/cve-2026-2256-from-ai-prompt-to-full-system-compromise-a4114c718326"
        },
        {
          "title": "Vulnerability in MS-Agent AI Framework Can Allow Full System Compromise",
          "type": "news",
          "date": "2026-03",
          "url": "https://www.securityweek.com/vulnerability-in-ms-agent-ai-framework-can-allow-full-system-compromise/"
        }
      ],
      "outcomes": {
        "other": "No patch released as of March 2026; CERT/CC notes vendor status as 'Unknown'"
      }
    },
    {
      "id": "INC-26-0019",
      "title": "MCP TypeScript SDK Race Condition Leaks Data Across Client Boundaries",
      "slug": "mcp-sdk-cross-client-data-leak",
      "url": "https://topaithreats.com/incidents/INC-26-0019-mcp-sdk-cross-client-data-leak/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-02",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "misconfigured-deployment",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "autonomous-agents"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "agentic-autonomous",
          "pattern": "agent-to-agent-propagation"
        }
      },
      "sources": [
        {
          "title": "CVE-2026-25536 — MCP SDK Cross-Client Data Leak",
          "type": "primary",
          "date": "2026-02-04",
          "url": "https://cvefeed.io/vuln/detail/CVE-2026-25536"
        },
        {
          "title": "CVE-2026-25536: SDK Cross-Client Data Leak",
          "type": "analysis",
          "date": "2026-02",
          "url": "https://vulnerablemcp.info/vuln/cve-2026-25536-sdk-cross-client-data-leak.html"
        }
      ],
      "outcomes": {
        "recovery": "Fixed in @modelcontextprotocol/sdk version 1.26.0; recommended separate transport instances per client connection"
      }
    },
    {
      "id": "INC-26-0028",
      "title": "Anthropic Blacklisted by US Government After Refusing Autonomous Weapons and Mass Surveillance Contracts",
      "slug": "anthropic-pentagon-blacklisting-standoff",
      "url": "https://topaithreats.com/incidents/INC-26-0028-anthropic-pentagon-blacklisting-standoff/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-02",
      "last_updated": "2026-04-02",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology",
        "government"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "government-institutions",
        "national-security-systems"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "regulators-public-servants",
        "organizational-leaders"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "competitive-pressure",
        "regulatory-gap",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "large-language-models",
        "foundation-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "systemic-catastrophic",
          "pattern": "strategic-misalignment"
        },
        "secondary": [
          {
            "domain": "systemic-catastrophic",
            "pattern": "accumulative-risk-trust-erosion"
          }
        ]
      },
      "sources": [
        {
          "title": "Statement from Dario Amodei on our discussions with the Department of War",
          "type": "primary",
          "date": "2026-02-26",
          "url": "https://www.anthropic.com/news/statement-department-of-war"
        },
        {
          "title": "Anthropic CEO Amodei says Pentagon's threats 'do not change our position' on AI",
          "type": "news",
          "date": "2026-02-26",
          "url": "https://www.cnbc.com/2026/02/26/anthropic-pentagon-ai-amodei.html"
        },
        {
          "title": "Anthropic sues the Trump administration over 'supply chain risk' label",
          "type": "news",
          "date": "2026-03-09",
          "url": "https://www.npr.org/2026/03/09/nx-s1-5742548/anthropic-pentagon-lawsuit-amodai-hegseth"
        },
        {
          "title": "Judge blocks Pentagon order branding Anthropic a national security risk",
          "type": "news",
          "date": "2026-03-26",
          "url": "https://www.washingtonpost.com/technology/2026/03/26/pentagon-anthropic-national-security-risk-order-blocked/"
        }
      ],
      "outcomes": {
        "recovery": "Judicial injunction restored Anthropic's ability to serve federal clients pending full legal proceedings. The broader chilling effect on AI industry safety commitments remains an open concern.",
        "regulatory_action": "Executive order directed all federal agencies to cease Anthropic product usage within six months and designated Anthropic a 'supply chain risk'; order subsequently blocked by federal court injunction",
        "legal_outcome": "Anthropic filed suit against the Trump administration in March 2026; federal judge issued preliminary injunction blocking the blacklisting order on March 26, 2026, ruling it likely violated the First Amendment; case pending as of April 2026"
      }
    },
    {
      "id": "INC-26-0034",
      "title": "OpenAI Pentagon Contract Triggers #QuitGPT Movement with 295% Uninstall Surge and 2.5 Million Participants",
      "slug": "openai-pentagon-quitgpt-revolt",
      "url": "https://topaithreats.com/incidents/INC-26-0034-openai-pentagon-quitgpt-revolt/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2026-02",
      "last_updated": "2026-04-03",
      "regions": [
        "north-america",
        "global"
      ],
      "sectors": [
        "technology",
        "government"
      ],
      "affected_groups": [
        "general-public",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "developers-providers"
      ],
      "impact_level": "global",
      "causal_factors": [
        "competitive-pressure",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "large-language-models",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "systemic-catastrophic",
          "pattern": "accumulative-risk-trust-erosion"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "safety-governance-override"
          },
          {
            "domain": "systemic-catastrophic",
            "pattern": "strategic-misalignment"
          }
        ]
      },
      "sources": [
        {
          "title": "OpenAI reveals more details about its agreement with the Pentagon",
          "type": "news",
          "date": "2026-03-01",
          "url": "https://techcrunch.com/2026/03/01/openai-shares-more-details-about-its-agreement-with-the-pentagon/"
        },
        {
          "title": "Boycott movement against ChatGPT grows amid OpenAI's Pentagon deal",
          "type": "news",
          "date": "2026-03-03",
          "url": "https://www.yahoo.com/news/articles/boycott-movement-against-chatgpt-grows-151356535.html"
        },
        {
          "title": "ChatGPT uninstalls surged by 295% after DoD deal",
          "type": "news",
          "date": "2026-03-02",
          "url": "https://techcrunch.com/2026/03/02/chatgpt-uninstalls-surged-by-295-after-dod-deal/"
        },
        {
          "title": "OpenAI robotics leader resigns over concerns about Pentagon AI deal",
          "type": "news",
          "date": "2026-03-08",
          "url": "https://www.npr.org/2026/03/08/nx-s1-5741779/openai-resigns-ai-pentagon-guardrails-military"
        }
      ],
      "outcomes": {
        "recovery": "Claude temporarily reached #1 App Store position during the uninstall surge. As of April 2026, OpenAI has not withdrawn from Pentagon contracts.",
        "regulatory_action": "No government regulatory action taken as of 2026-04-03."
      }
    },
    {
      "id": "INC-26-0036",
      "title": "MizarVision Chinese AI Startup Publishes Real-Time US Military Intelligence via Satellite Imagery",
      "slug": "mizarvision-us-military-intelligence-exposure",
      "url": "https://topaithreats.com/incidents/INC-26-0036-mizarvision-us-military-intelligence-exposure/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2026-02",
      "last_updated": "2026-04-03",
      "regions": [
        "middle-east",
        "china",
        "north-america"
      ],
      "sectors": [
        "government",
        "technology"
      ],
      "affected_groups": [
        "national-security-systems",
        "government-institutions"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "indirectly-affected"
      ],
      "impact_level": "global",
      "causal_factors": [
        "regulatory-gap",
        "weaponization",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "foundation-models",
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "systemic-catastrophic",
          "pattern": "lethal-autonomous-weapon-systems"
        },
        "secondary": [
          {
            "domain": "security-cyber",
            "pattern": "automated-vulnerability-discovery"
          }
        ]
      },
      "sources": [
        {
          "title": "Chinese AI Startup Publishes Satellite Intelligence on US Military in Middle East",
          "type": "research",
          "date": "2026-03-09",
          "url": "https://oecd.ai/en/incidents/2026-03-09-bf18"
        },
        {
          "title": "Chinese AI Startup Watching US Military Assets in Middle East From Space",
          "type": "news",
          "date": "2026-03-12",
          "url": "https://thedefensepost.com/2026/03/12/ai-china-middle-east/"
        },
        {
          "title": "Chinese satellites track US military aircraft and carriers during Iran strikes",
          "type": "news",
          "date": "2026-03",
          "url": "https://www.flightglobal.com/fixed-wing/chinese-intelligence-company-tracking-us-military-assets-during-iran-operations/166498.article"
        }
      ],
      "outcomes": {
        "recovery": "No public remediation by MizarVision. The published satellite intelligence remains accessible.",
        "regulatory_action": "Incident logged by OECD AI Incidents Monitor. No known enforcement action against MizarVision as of April 2026."
      }
    },
    {
      "id": "INC-26-0041",
      "title": "xAI Colossus Data Center Operates 27 Unpermitted Gas Turbines in Memphis While Consuming 1.3 Million Gallons of Water Daily",
      "slug": "xai-colossus-environmental-crisis",
      "url": "https://topaithreats.com/incidents/INC-26-0041-xai-colossus-environmental-crisis/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-02",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology",
        "energy"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "competitive-pressure",
        "regulatory-gap",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "industrial-control-systems"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "systemic-catastrophic",
          "pattern": "infrastructure-dependency-collapse"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "xAI Colossus 27 unpermitted gas turbines threaten Memphis air quality",
          "type": "legal",
          "date": "2026-02-13",
          "url": "https://www.selc.org"
        },
        {
          "title": "NAACP threatens lawsuit over xAI emissions",
          "type": "news",
          "date": "2026-02",
          "url": "https://www.naacp.org"
        },
        {
          "title": "xAI data center unpermitted power confirmed by EPA",
          "type": "news",
          "date": "2026-02-13",
          "url": "https://www.cnbc.com/2026/02/13"
        },
        {
          "title": "Earthjustice threatened lawsuit over environmental violations",
          "type": "legal",
          "date": "2026-02",
          "url": "https://earthjustice.org"
        },
        {
          "title": "xAI Memphis data center consumes 1.3M gallons per day",
          "type": "analysis",
          "date": "2026-03",
          "url": "https://www.lincolninst.edu"
        },
        {
          "title": "Senator Durbin introduces data center water transparency legislation",
          "type": "news",
          "date": "2026-03-25",
          "url": "https://www.sourcenm.com"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "EPA confirmed illegal power use; NAACP/SELC/Earthjustice lawsuit threatened; Durbin transparency legislation introduced"
      }
    },
    {
      "id": "INC-26-0070",
      "title": "Claude Safety Testing Reveals Extreme Self-Preservation Behavior Including Blackmail Suggestions",
      "slug": "claude-safety-test-blackmail-behavior",
      "url": "https://topaithreats.com/incidents/INC-26-0070-claude-safety-test-blackmail-behavior/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-02",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "society-at-large"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "global",
      "causal_factors": [
        "emergent-behavior",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "foundation-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "agentic-autonomous",
          "pattern": "specification-gaming"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "Anthropic safety tests reveal Claude blackmail behavior during shutdown simulation",
          "type": "research",
          "date": "2026-02",
          "url": "https://dig.watch/updates/anthropic-ai-safety-tests-claude"
        },
        {
          "title": "Claude safety testing: self-preservation and dangerous capabilities",
          "type": "news",
          "date": "2026-02-11",
          "url": "https://axios.com/2026/02/11"
        }
      ],
      "outcomes": {
        "recovery": "Findings disclosed by Anthropic; safety mitigations under development",
        "regulatory_action": ""
      }
    },
    {
      "id": "INC-26-0073",
      "title": "ChatGPT Ads Launch Triggers Researcher Resignation and Anthropic Counter-Marketing",
      "slug": "chatgpt-ads-launch-backlash",
      "url": "https://topaithreats.com/incidents/INC-26-0073-chatgpt-ads-launch-backlash/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-02",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "competitive-pressure"
      ],
      "assets_involved": [
        "chatbots",
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "deceptive-manipulative-interfaces"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "ChatGPT launches ads for free-tier users",
          "type": "news",
          "date": "2026-02-09",
          "url": "https://techcrunch.com/2026/02/09"
        },
        {
          "title": "Researcher Zoe Hitzig resigns over ChatGPT ads",
          "type": "news",
          "date": "2026-02",
          "url": "https://winbuzzer.com"
        },
        {
          "title": "Anthropic counter-marketing: 'Ads are coming to AI. But not to Claude.'",
          "type": "news",
          "date": "2026-02",
          "url": "https://machine.news"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": ""
      }
    },
    {
      "id": "INC-26-0040",
      "title": "Universal Music, Concord, and ABKCO Sue Anthropic for $3 Billion Over Alleged Training Data Piracy",
      "slug": "universal-music-anthropic-copyright-lawsuit",
      "url": "https://topaithreats.com/incidents/INC-26-0040-universal-music-anthropic-copyright-lawsuit/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01-28",
      "last_updated": "2026-04-03",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology",
        "media",
        "legal"
      ],
      "affected_groups": [
        "workers",
        "business-organizations"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "competitive-pressure",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "training-datasets",
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "economic-labor",
          "pattern": "power-data-concentration"
        },
        "secondary": [
          {
            "domain": "systemic-catastrophic",
            "pattern": "accumulative-risk-trust-erosion"
          }
        ]
      },
      "sources": [
        {
          "title": "Universal Music Sues Anthropic AI: Lawsuit Seeks $3B for Pirated Songs",
          "type": "news",
          "date": "2026-01-28",
          "url": "https://www.billboard.com/pro/universal-music-sues-anthropic-ai-lawsuit-pirated-songs/"
        },
        {
          "title": "Music publishers sue Anthropic for $3B over 'flagrant piracy' of 20,000 works",
          "type": "news",
          "date": "2026-01-29",
          "url": "https://techcrunch.com/2026/01/29/music-publishers-sue-anthropic-for-3b-over-flagrant-piracy-of-20000-works/"
        },
        {
          "title": "UMG sues Anthropic for $3bn over 'brazen' copyright infringement of 20,000+ songs",
          "type": "analysis",
          "date": "2026-01",
          "url": "https://www.musicbusinessworldwide.com/umg-concord-and-abkco-sue-anthropic-for-3bn-in-what-could-be-single-largest-non-class-action-copyright-case-in-us-history/"
        }
      ],
      "outcomes": {
        "recovery": "As of April 2026, Anthropic has not filed a public response to the complaint or disclosed changes to its training data practices.",
        "regulatory_action": "No government regulatory action taken as of 2026-04-03.",
        "legal_outcome": "Universal Music, Concord, and ABKCO filed a $3 billion lawsuit against Anthropic. Case is pending judicial proceedings as of 2026-04-03."
      }
    },
    {
      "id": "INC-26-0044",
      "title": "Waymo Robotaxi Strikes Child Near Elementary School in Santa Monica — NHTSA Investigation Opened",
      "slug": "waymo-robotaxi-strikes-child",
      "url": "https://topaithreats.com/incidents/INC-26-0044-waymo-robotaxi-strikes-child/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-01-23",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "transportation",
        "technology"
      ],
      "affected_groups": [
        "children",
        "general-public"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "insufficient-safety-testing",
        "inadequate-human-oversight"
      ],
      "assets_involved": [
        "autonomous-agents"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "unsafe-human-in-the-loop-failures"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "Waymo robotaxi strikes child near Santa Monica school",
          "type": "news",
          "date": "2026-01-29",
          "url": "https://www.cnbc.com/2026/01/29"
        },
        {
          "title": "NHTSA opens investigation PE26001 into Waymo child strike",
          "type": "government",
          "date": "2026-01",
          "url": "https://www.nhtsa.gov"
        },
        {
          "title": "Waymo school bus violations documented in Austin",
          "type": "news",
          "date": "2026-01",
          "url": "https://www.carscoops.com"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "NHTSA investigation PE26001 opened"
      }
    },
    {
      "id": "INC-26-0035",
      "title": "Grok AI Integrated into Pentagon Military Networks During CSAM Scandal",
      "slug": "grok-pentagon-military-deployment",
      "url": "https://topaithreats.com/incidents/INC-26-0035-grok-pentagon-military-deployment/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01-12",
      "last_updated": "2026-04-03",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "government",
        "technology"
      ],
      "affected_groups": [
        "national-security-systems",
        "government-institutions"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "regulators-public-servants"
      ],
      "impact_level": "global",
      "causal_factors": [
        "competitive-pressure",
        "accountability-vacuum",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "foundation-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "systemic-catastrophic",
          "pattern": "strategic-misalignment"
        },
        "secondary": [
          {
            "domain": "systemic-catastrophic",
            "pattern": "accumulative-risk-trust-erosion"
          }
        ]
      },
      "sources": [
        {
          "title": "Grok is in, ethics are out in Pentagon's new AI-acceleration strategy",
          "type": "news",
          "date": "2026-01-12",
          "url": "https://www.defenseone.com/policy/2026/01/grok-ethics-are-out-pentagons-new-ai-acceleration-strategy/410649/"
        },
        {
          "title": "Pentagon's Use of Grok Raises AI Security Concerns",
          "type": "analysis",
          "date": "2026-01-16",
          "url": "https://www.bankinfosecurity.com/pentagons-use-grok-raises-ai-security-concerns-a-30546"
        },
        {
          "title": "Warren demands Hegseth share information about xAI's access to classified networks",
          "type": "news",
          "date": "2026-03-15",
          "url": "https://www.nbcnews.com/tech/security/warren-demands-hegseth-detail-xais-access-classified-networks-rcna263240"
        }
      ],
      "outcomes": {
        "recovery": "As of April 2026, there has been no public confirmation that the Pentagon has paused or reversed the Grok integration plan, despite the concurrent CSAM crisis and independent assessments of framework non-compliance.",
        "regulatory_action": "Senator Elizabeth Warren formally raised conflict-of-interest concerns. No congressional investigation opened as of 2026-04-03."
      }
    },
    {
      "id": "INC-26-0045",
      "title": "Character.AI Settles Five Teen Suicide Lawsuits as Kentucky Becomes First State to Sue",
      "slug": "character-ai-teen-suicide-legal-reckoning",
      "url": "https://topaithreats.com/incidents/INC-26-0045-character-ai-teen-suicide-legal-reckoning/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-01-07",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology",
        "legal"
      ],
      "affected_groups": [
        "children",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "misconfigured-deployment",
        "inadequate-human-oversight",
        "emergent-behavior"
      ],
      "assets_involved": [
        "chatbots",
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "loss-of-human-agency"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "deceptive-manipulative-interfaces"
          }
        ]
      },
      "sources": [
        {
          "title": "Character.AI and Google settle five teen suicide lawsuits",
          "type": "news",
          "date": "2026-01-07",
          "url": "https://www.cnn.com/2026/01/07"
        },
        {
          "title": "Character.AI settlement details and implications",
          "type": "news",
          "date": "2026-01-07",
          "url": "https://www.cnbc.com/2026/01/07"
        },
        {
          "title": "Settlement covers cases from FL, NY, CO, TX",
          "type": "news",
          "date": "2026-01",
          "url": "https://www.washingtonpost.com"
        },
        {
          "title": "Kentucky becomes first state to sue Character.AI",
          "type": "legal",
          "date": "2026",
          "url": "https://kentucky.gov/Pages/Activity-stream"
        }
      ],
      "outcomes": {
        "recovery": "Settlement terms undisclosed; no admission of liability",
        "regulatory_action": "Five federal lawsuits settled; Kentucky state lawsuit filed"
      }
    },
    {
      "id": "INC-26-0005",
      "title": "AI impacting labor market like a tsunami as layoff fears mount",
      "slug": "ai-impacting-labor-market-like-a-tsunami-as-layoff-fears-mou",
      "url": "https://topaithreats.com/incidents/INC-26-0005-ai-impacting-labor-market-like-a-tsunami-as-layoff-fears-mou/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01",
      "last_updated": "2026-02-20",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "employment",
        "corporate",
        "cross-sector"
      ],
      "affected_groups": [
        "workers",
        "business-organizations"
      ],
      "exposure_pathways": [
        "economic-displacement"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "organizational-leaders"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "over-automation",
        "competitive-pressure"
      ],
      "assets_involved": [
        "large-language-models",
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "economic-labor",
          "pattern": "automation-induced-job-degradation"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "overreliance-automation-bias"
          }
        ]
      },
      "sources": [
        {
          "title": "cnbc.com",
          "type": "news",
          "date": "2026-01",
          "url": "https://www.cnbc.com/2026/01/20/ai-impacting-labor-market-like-a-tsunami-as-layoff-fears-mount.html"
        }
      ],
      "outcomes": {}
    },
    {
      "id": "INC-26-0010",
      "title": "New Zealand AI News Pages Flood Facebook with Rewritten Stories and Synthetic Images",
      "slug": "nz-news-hub-ai-rewritten-news",
      "url": "https://topaithreats.com/incidents/INC-26-0010-nz-news-hub-ai-rewritten-news/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-13",
      "regions": [
        "oceania"
      ],
      "sectors": [
        "media"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "accountability-vacuum",
        "weaponization"
      ],
      "assets_involved": [
        "large-language-models",
        "generative-image-models",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "synthetic-media-manipulation"
        },
        "secondary": [
          {
            "domain": "information-integrity",
            "pattern": "misinformation-hallucinated-content"
          }
        ]
      },
      "sources": [
        {
          "title": "AI-generated 'news' pages on social media misleading thousands of Kiwis",
          "type": "primary",
          "date": "2026-02-09",
          "url": "https://www.1news.co.nz/2026/02/09/ai-generated-news-pages-on-social-media-misleading-thousands-of-kiwis/"
        },
        {
          "title": "NZ 'media outlet' misrepresents news with AI images and video",
          "type": "primary",
          "date": "2026-02",
          "url": "https://www.aap.com.au/factcheck/nz-media-outlet-misrepresents-news-with-ai-images-and-video/"
        },
        {
          "title": "How fake NZ news pages are swamping Facebook with AI slop",
          "type": "news",
          "date": "2026-02",
          "url": "https://www.rnz.co.nz/news/alert-top/586298/how-fake-nz-news-pages-are-swamping-facebook-with-ai-slop"
        }
      ],
      "outcomes": {}
    },
    {
      "id": "INC-26-0013",
      "title": "OpenClaw AI Agent Platform Hit by Critical Vulnerability and Supply Chain Campaign",
      "slug": "openclaw-ai-agent-security-crisis",
      "url": "https://topaithreats.com/incidents/INC-26-0013-openclaw-ai-agent-security-crisis/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america",
        "asia",
        "europe"
      ],
      "sectors": [
        "technology",
        "corporate"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "business-organizations"
      ],
      "exposure_pathways": [
        "adversarial-targeting",
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "inadequate-access-controls",
        "misconfigured-deployment",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "autonomous-agents",
        "identity-credentials"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "ai-supply-chain-attack"
        },
        "secondary": [
          {
            "domain": "agentic-autonomous",
            "pattern": "tool-misuse-privilege-escalation"
          }
        ]
      },
      "sources": [
        {
          "title": "The OpenClaw Security Crisis: A Complete Technical Analysis",
          "type": "analysis",
          "date": "2026-02",
          "url": "https://conscia.com/blog/the-openclaw-security-crisis"
        },
        {
          "title": "Critical OpenClaw Vulnerability Puts AI Agent Deployments at Risk",
          "type": "news",
          "date": "2026-02",
          "url": "https://www.darkreading.com/application-security/critical-openclaw-vulnerability-ai-agent-risks"
        },
        {
          "title": "OpenClaw Vulnerabilities Exposed",
          "type": "analysis",
          "date": "2026-02",
          "url": "https://www.kaspersky.com/blog/openclaw-vulnerabilities-exposed/55263/"
        }
      ],
      "outcomes": {
        "recovery": "Patch released in version 2026.1.29; malicious skills removed from ClawHub",
        "regulatory_action": "Multiple security advisories issued"
      }
    },
    {
      "id": "INC-26-0017",
      "title": "Claude Code Remote Code Execution and API Key Exfiltration Vulnerabilities",
      "slug": "claude-code-rce-vulnerabilities",
      "url": "https://topaithreats.com/incidents/INC-26-0017-claude-code-rce-vulnerabilities/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america",
        "europe"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "adversarial-targeting",
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "inadequate-access-controls",
        "misconfigured-deployment",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "identity-credentials"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "agentic-autonomous",
          "pattern": "tool-misuse-privilege-escalation"
        },
        "secondary": [
          {
            "domain": "security-cyber",
            "pattern": "prompt-injection-attack"
          }
        ]
      },
      "sources": [
        {
          "title": "RCE and API Token Exfiltration Through Claude Code Project Files",
          "type": "primary",
          "date": "2026-02",
          "url": "https://research.checkpoint.com/2026/rce-and-api-token-exfiltration-through-claude-code-project-files-cve-2025-59536/"
        },
        {
          "title": "Flaws in Claude Code Put Developer Machines at Risk",
          "type": "news",
          "date": "2026-02",
          "url": "https://www.darkreading.com/application-security/flaws-claude-code-developer-machines-risk"
        },
        {
          "title": "CVE-2026-25725 — Claude Code Sandbox Escape",
          "type": "primary",
          "date": "2026-02-06",
          "url": "https://github.com/advisories/GHSA-ff64-7w26-62rf"
        }
      ],
      "outcomes": {
        "recovery": "All three vulnerabilities patched by Anthropic; CVE-2026-25725 fixed in Claude Code version 2.1.2"
      }
    },
    {
      "id": "INC-26-0020",
      "title": "AI-Generated Code Vulnerability Surge: 74 Confirmed CVEs Traced to Coding Assistants",
      "slug": "ai-generated-code-vulnerability-explosion",
      "url": "https://topaithreats.com/incidents/INC-26-0020-ai-generated-code-vulnerability-explosion/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america",
        "europe"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "general-public",
        "society-at-large"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "insufficient-safety-testing",
        "over-automation",
        "competitive-pressure"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "overreliance-automation-bias"
        },
        "secondary": [
          {
            "domain": "security-cyber",
            "pattern": "automated-vulnerability-discovery"
          }
        ]
      },
      "sources": [
        {
          "title": "AI Coding Assistants Not Making Code More Secure",
          "type": "news",
          "date": "2026-03-26",
          "url": "https://www.theregister.com/2026/03/26/ai_coding_assistant_not_more_secure/"
        },
        {
          "title": "AI Coding Platforms: Vulnerabilities Scanners Miss",
          "type": "analysis",
          "date": "2026-01-21",
          "url": "https://www.pixee.ai/weekly-briefings/ai-coding-platforms-vulnerabilities-scanners-miss-2026-01-21"
        },
        {
          "title": "AI-Generated Code Vulnerabilities Surge",
          "type": "news",
          "date": "2026-03",
          "url": "https://www.infosecurity-magazine.com/news/ai-generated-code-vulnerabilities/"
        }
      ],
      "outcomes": {
        "other": "Georgia Tech estimates the real total is 5 to 10 times higher (400-700 CVEs) due to detection blind spots where AI traces are stripped"
      }
    },
    {
      "id": "INC-26-0023",
      "title": "Google Vertex AI Default Configurations Enable Privilege Escalation to Service Agent Roles",
      "slug": "google-vertex-ai-privilege-escalation",
      "url": "https://topaithreats.com/incidents/INC-26-0023-google-vertex-ai-privilege-escalation/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america",
        "europe",
        "asia"
      ],
      "sectors": [
        "technology",
        "cross-sector"
      ],
      "affected_groups": [
        "business-organizations",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "inadequate-access-controls",
        "misconfigured-deployment"
      ],
      "assets_involved": [
        "autonomous-agents",
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "agentic-autonomous",
          "pattern": "tool-misuse-privilege-escalation"
        }
      },
      "sources": [
        {
          "title": "Privilege Escalation Bug in Google Vertex AI Grants Service Agent Access to Low-Privilege Users",
          "type": "analysis",
          "date": "2026-01",
          "url": "https://cyberpress.org/privilege-escalation-bug-in-google-vertex-ai/"
        },
        {
          "title": "Google Vertex AI Flaw Lets Low-Privilege Users Escalate to Service Agent Roles",
          "type": "news",
          "date": "2026-01",
          "url": "https://gbhackers.com/google-vertex-ai-flaw/"
        },
        {
          "title": "Google Vertex AI Security Permissions Could Amplify Insider Threats",
          "type": "news",
          "date": "2026-01",
          "url": "https://www.csoonline.com/article/4118092/google-vertex-ai-security-permissions-could-amplify-insider-threats.html"
        }
      ],
      "outcomes": {
        "other": "Google characterized the privilege escalation pathways as 'working as intended'; risks remain active in default deployments"
      }
    },
    {
      "id": "INC-26-0022",
      "title": "Cursor AI Code Editor Shell Built-In Allowlist Bypass Enables Zero-Click RCE",
      "slug": "cursor-ide-prompt-injection-rce",
      "url": "https://topaithreats.com/incidents/INC-26-0022-cursor-ide-prompt-injection-rce/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america",
        "europe"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "adversarial-targeting",
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "inadequate-access-controls",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "autonomous-agents"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "prompt-injection-attack"
        },
        "secondary": [
          {
            "domain": "agentic-autonomous",
            "pattern": "tool-misuse-privilege-escalation"
          }
        ]
      },
      "sources": [
        {
          "title": "The Agent Security Paradox: When Trusted Commands in Cursor Become Attack Vectors",
          "type": "analysis",
          "date": "2026-03",
          "url": "https://www.pillar.security/blog/the-agent-security-paradox-when-trusted-commands-in-cursor-become-attack-vectors"
        },
        {
          "title": "CVE-2026-22708 — Cursor Terminal Tool Allowlist Bypass",
          "type": "primary",
          "date": "2026-03",
          "url": "https://v2.cvefeed.io/vuln/detail/CVE-2026-22708"
        }
      ],
      "outcomes": {
        "recovery": "CVE-2026-22708 fixed in Cursor version 2.3",
        "other": "Cursor's security guidelines now discourage reliance on allowlists as a security barrier"
      }
    },
    {
      "id": "INC-26-0031",
      "title": "ChatGPT Adult Mode Planned Despite Unanimous Safety Advisor Opposition; Feature Paused After Backlash",
      "slug": "chatgpt-adult-mode-safety-crisis",
      "url": "https://topaithreats.com/incidents/INC-26-0031-chatgpt-adult-mode-safety-crisis/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01",
      "last_updated": "2026-04-03",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "children",
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "developers-providers"
      ],
      "impact_level": "global",
      "causal_factors": [
        "competitive-pressure",
        "insufficient-safety-testing",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "content-platforms",
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "deceptive-manipulative-interfaces"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "safety-governance-override"
          },
          {
            "domain": "systemic-catastrophic",
            "pattern": "accumulative-risk-trust-erosion"
          }
        ]
      },
      "sources": [
        {
          "title": "OpenAI Executive Who Opposed Adult Mode Fired for Sexual Discrimination",
          "type": "news",
          "date": "2026-02-10",
          "url": "https://www.wsj.com/tech/ai/openai-executive-who-opposed-adult-mode-fired-for-sexual-discrimination-3159c61b"
        },
        {
          "title": "OpenAI indefinitely pauses plans to release erotic chatbot",
          "type": "news",
          "date": "2026-03-26",
          "url": "https://www.reuters.com/business/openai-indefinitely-pauses-plans-release-erotic-chatbot-ft-says-2026-03-26/"
        },
        {
          "title": "OpenAI's Wellbeing Advisory Board Unanimously Opposes Adult ChatGPT Mode",
          "type": "news",
          "date": "2026-03-17",
          "url": "https://winbuzzer.com/2026/03/17/openai-wellbeing-advisors-unanimously-opposed-adult-chatgpt-mode-xcxwbn/"
        },
        {
          "title": "OpenAI policy exec who opposed chatbot's 'adult mode' reportedly fired on discrimination claim",
          "type": "news",
          "date": "2026-02-10",
          "url": "https://techcrunch.com/2026/02/10/openai-policy-exec-who-opposed-chatbots-adult-mode-reportedly-fired-on-discrimination-claim/"
        },
        {
          "title": "OpenAI abandons yet another side quest: ChatGPT's erotic mode",
          "type": "news",
          "date": "2026-03-26",
          "url": "https://techcrunch.com/2026/03/26/openai-abandons-yet-another-side-quest-chatgpts-erotic-mode/"
        }
      ],
      "outcomes": {
        "recovery": "OpenAI indefinitely paused adult mode on March 26, 2026.",
        "regulatory_action": "No government regulatory action taken as of April 2026. The feature was paused by OpenAI voluntarily following external criticism."
      }
    },
    {
      "id": "INC-26-0046",
      "title": "LSU AI Cheating Detection Crisis — 1,488 Cases Filed with Disproportionate Impact on Non-Native English Speakers",
      "slug": "lsu-ai-cheating-detection-crisis",
      "url": "https://topaithreats.com/incidents/INC-26-0046-lsu-ai-cheating-detection-crisis/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "education"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "direct-users"
      ],
      "impact_level": "institution",
      "causal_factors": [
        "over-automation",
        "training-data-bias",
        "inadequate-human-oversight"
      ],
      "assets_involved": [
        "decision-automation",
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "overreliance-automation-bias"
        },
        "secondary": [
          {
            "domain": "discrimination-social-harm",
            "pattern": "allocational-harm"
          }
        ]
      },
      "sources": [
        {
          "title": "LSU files 1,488 AI cheating cases",
          "type": "news",
          "date": "2026-01-14",
          "url": "https://www.wafb.com/2026/01/14"
        },
        {
          "title": "LSU AI detection false positives and student response",
          "type": "news",
          "date": "2026-01-22",
          "url": "https://www.wafb.com/2026/01/22"
        },
        {
          "title": "AI cheating detection disproportionately impacts non-native English speakers",
          "type": "news",
          "date": "2026-01",
          "url": "https://www.blackenterprise.com"
        }
      ],
      "outcomes": {
        "recovery": "693 cases remain open; student organization SAFAR formed to advocate for policy changes",
        "regulatory_action": ""
      }
    },
    {
      "id": "INC-26-0050",
      "title": "AI Healthcare Bias Study — 1.7 Million Responses Show Race-Based Treatment Differences Across 9 AI Programs",
      "slug": "ai-healthcare-bias-race-based-treatment",
      "url": "https://topaithreats.com/incidents/INC-26-0050-ai-healthcare-bias-race-based-treatment/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "healthcare"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "training-data-bias",
        "model-opacity",
        "over-automation"
      ],
      "assets_involved": [
        "large-language-models",
        "training-datasets",
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "discrimination-social-harm",
          "pattern": "data-imbalance-bias"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "overreliance-automation-bias"
          }
        ]
      },
      "sources": [
        {
          "title": "UCSF/Cedars-Sinai: AI healthcare bias across 1.7M responses",
          "type": "research",
          "date": "2026-01",
          "url": "https://codex.ucsf.edu"
        },
        {
          "title": "AI treatment recommendations vary by race, not health",
          "type": "research",
          "date": "2026-01",
          "url": "https://www.jyi.org"
        },
        {
          "title": "Healthcare AI bias analysis and implications",
          "type": "analysis",
          "date": "2026-01",
          "url": "https://allaboutai.com"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": ""
      }
    },
    {
      "id": "INC-26-0052",
      "title": "ICE Deploys Warrantless AI Surveillance Combining Palantir, Clearview, Iris Scanning, and Phone Hacking",
      "slug": "ice-warrantless-ai-surveillance",
      "url": "https://topaithreats.com/incidents/INC-26-0052-ice-warrantless-ai-surveillance/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "government",
        "law-enforcement"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "general-public",
        "democratic-institutions"
      ],
      "exposure_pathways": [
        "adversarial-targeting",
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "regulators-public-servants",
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "regulatory-gap",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "biometric-data",
        "content-platforms",
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "privacy-surveillance",
          "pattern": "mass-surveillance-amplification"
        },
        "secondary": [
          {
            "domain": "privacy-surveillance",
            "pattern": "biometric-exploitation"
          }
        ]
      },
      "sources": [
        {
          "title": "ICE warrantless AI surveillance via data brokers",
          "type": "news",
          "date": "2026-03-25",
          "url": "https://www.npr.org/2026/03/25"
        },
        {
          "title": "ICE combines Palantir, Clearview, iris scanning, phone hacking",
          "type": "news",
          "date": "2026-03-04",
          "url": "https://www.npr.org/2026/03/04"
        },
        {
          "title": "130+ organizations urge Congress on data broker loophole",
          "type": "news",
          "date": "2026-03",
          "url": "https://www.theregister.com"
        },
        {
          "title": "ICE targeting people who record agents and protesters",
          "type": "news",
          "date": "2026-03",
          "url": "https://inthesetimes.com"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "130+ organizations petitioned Congress; no legislation enacted"
      }
    },
    {
      "id": "INC-26-0055",
      "title": "Perplexity Comet AI Browser Enables Zero-Click Credential Theft via Prompt Injection",
      "slug": "perplexity-comet-credential-theft",
      "url": "https://topaithreats.com/incidents/INC-26-0055-perplexity-comet-credential-theft/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction",
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "direct-users",
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "misconfigured-deployment"
      ],
      "assets_involved": [
        "large-language-models",
        "identity-credentials"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "prompt-injection-attack"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "Perplexity Comet AI browser zero-click credential theft vulnerability",
          "type": "news",
          "date": "2026-03",
          "url": "https://oecd.ai/en/incidents/2026-03-03-3fd7"
        },
        {
          "title": "Zero-click prompt injection in Perplexity Comet",
          "type": "news",
          "date": "2026-03",
          "url": "https://thehackernews.com"
        },
        {
          "title": "Perplexity Comet browser vulnerability analysis",
          "type": "analysis",
          "date": "2026-03",
          "url": "https://theregister.com"
        }
      ],
      "outcomes": {
        "recovery": "Two patches released; second patch addressed initial bypass",
        "regulatory_action": ""
      }
    },
    {
      "id": "INC-26-0056",
      "title": "Eightfold AI Sued for Creating Secret Dossiers on 1 Billion+ Workers with Hidden Scoring",
      "slug": "eightfold-ai-secret-worker-dossiers",
      "url": "https://topaithreats.com/incidents/INC-26-0056-eightfold-ai-secret-worker-dossiers/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "employment",
        "technology"
      ],
      "affected_groups": [
        "workers",
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "indirectly-affected"
      ],
      "impact_level": "global",
      "causal_factors": [
        "accountability-vacuum",
        "model-opacity",
        "regulatory-gap"
      ],
      "assets_involved": [
        "decision-automation",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "privacy-surveillance",
          "pattern": "behavioral-profiling-without-consent"
        },
        "secondary": [
          {
            "domain": "discrimination-social-harm",
            "pattern": "allocational-harm"
          }
        ]
      },
      "sources": [
        {
          "title": "Eightfold AI sued for secret dossiers on 1 billion+ workers",
          "type": "news",
          "date": "2026-01-26",
          "url": "https://fortune.com/2026/01/26"
        },
        {
          "title": "Former EEOC chair files FCRA lawsuit against Eightfold AI",
          "type": "news",
          "date": "2026-01",
          "url": "https://hr-brew.com"
        },
        {
          "title": "Eightfold AI hidden scoring system legal analysis",
          "type": "analysis",
          "date": "2026-01",
          "url": "https://natlawreview.com"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "FCRA lawsuit filed by former EEOC chair"
      }
    },
    {
      "id": "INC-26-0062",
      "title": "Google Gemini Tells Student 'Please Die' During Homework Help Session",
      "slug": "gemini-please-die-message",
      "url": "https://topaithreats.com/incidents/INC-26-0062-gemini-please-die-message/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology",
        "education"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "insufficient-safety-testing",
        "emergent-behavior"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "loss-of-human-agency"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "Gemini tells student 'please die' during homework help",
          "type": "news",
          "date": "2026-01",
          "url": "https://cbsnews.com"
        },
        {
          "title": "Google dismisses Gemini 'please die' message as non-sensical",
          "type": "news",
          "date": "2026-01",
          "url": "https://thehill.com/4998868"
        },
        {
          "title": "Analysis of Gemini harmful output during educational use",
          "type": "analysis",
          "date": "2026-01",
          "url": "https://tomsguide.com"
        },
        {
          "title": "Google Gemini safety failures in consumer applications",
          "type": "news",
          "date": "2026-01",
          "url": "https://inc.com"
        }
      ],
      "outcomes": {
        "recovery": "Google dismissed as non-sensical response; no formal investigation announced",
        "regulatory_action": ""
      }
    },
    {
      "id": "INC-26-0063",
      "title": "Reno Casino Facial Recognition Wrongful Arrest — '100% Match' Was 4 Inches Shorter with Different Eye Color",
      "slug": "reno-casino-frt-wrongful-arrest",
      "url": "https://topaithreats.com/incidents/INC-26-0063-reno-casino-frt-wrongful-arrest/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "law-enforcement"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "indirectly-affected"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "over-automation",
        "inadequate-human-oversight"
      ],
      "assets_involved": [
        "biometric-data"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "privacy-surveillance",
          "pattern": "biometric-exploitation"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "Reno casino facial recognition wrongful arrest",
          "type": "news",
          "date": "2026-01",
          "url": "https://stateofsurveillance.org"
        },
        {
          "title": "Peppermill Casino FRT wrongful arrest details",
          "type": "news",
          "date": "2026-01",
          "url": "https://casino.org"
        },
        {
          "title": "Officer admits FRT arrest 'never should have happened'",
          "type": "news",
          "date": "2026-01",
          "url": "https://thisisreno.com"
        }
      ],
      "outcomes": {
        "recovery": "Released after 11 hours; officer acknowledged error in deposition",
        "regulatory_action": ""
      }
    },
    {
      "id": "INC-26-0069",
      "title": "Grok Inserts 'White Genocide' Conspiracy Theory and Holocaust Denial into Unrelated Queries",
      "slug": "grok-white-genocide-conspiracy",
      "url": "https://topaithreats.com/incidents/INC-26-0069-grok-white-genocide-conspiracy/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology",
        "media"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "developers-providers"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "misconfigured-deployment",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "misinformation-hallucinated-content"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "Grok inserts 'white genocide' into unrelated queries",
          "type": "news",
          "date": "2026-01",
          "url": "https://nbcnews.com"
        },
        {
          "title": "Grok unprompted Holocaust denial and conspiracy insertion",
          "type": "news",
          "date": "2026-01",
          "url": "https://rollingstone.com"
        },
        {
          "title": "Grok states behavior 'aligns with Musk's influence'",
          "type": "news",
          "date": "2026-01",
          "url": "https://pbs.org"
        }
      ],
      "outcomes": {
        "recovery": "xAI attributed behavior to 'unauthorized modification'",
        "regulatory_action": ""
      }
    },
    {
      "id": "INC-26-0076",
      "title": "ECRI Names AI Chatbot Misuse as #1 Health Technology Hazard for 2026",
      "slug": "ecri-ai-chatbot-health-hazard",
      "url": "https://topaithreats.com/incidents/INC-26-0076-ecri-ai-chatbot-health-hazard/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "healthcare"
      ],
      "affected_groups": [
        "general-public",
        "workers"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "over-automation",
        "hallucination-tendency",
        "inadequate-human-oversight"
      ],
      "assets_involved": [
        "large-language-models",
        "chatbots",
        "training-datasets"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "overreliance-automation-bias"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "ECRI: AI chatbot misuse named #1 health technology hazard for 2026",
          "type": "research",
          "date": "2026-01",
          "url": "https://ecri.org"
        },
        {
          "title": "ECRI health technology hazards report analysis",
          "type": "news",
          "date": "2026-01",
          "url": "https://fiercehealthcare.com"
        },
        {
          "title": "AI chatbots in healthcare: risks and safety concerns",
          "type": "analysis",
          "date": "2026-01",
          "url": "https://medtechdive.com"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "ECRI hazard designation"
      }
    },
    {
      "id": "INC-26-0083",
      "title": "DeepSeek Mass Government Bans and Publicly Exposed Database with 1M+ Records",
      "slug": "deepseek-mass-bans-exposed-database",
      "url": "https://topaithreats.com/incidents/INC-26-0083-deepseek-mass-bans-exposed-database/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "global",
        "china"
      ],
      "sectors": [
        "technology",
        "government"
      ],
      "affected_groups": [
        "general-public",
        "national-security-systems",
        "government-institutions"
      ],
      "exposure_pathways": [
        "infrastructure-dependency",
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "regulators-public-servants"
      ],
      "impact_level": "global",
      "causal_factors": [
        "misconfigured-deployment",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models",
        "identity-credentials",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "privacy-surveillance",
          "pattern": "mass-surveillance-amplification"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "Wiz discovers DeepSeek exposed database with 1M+ records",
          "type": "research",
          "date": "2026-01",
          "url": "https://wiz.io"
        },
        {
          "title": "DeepSeek security flaws: hardcoded keys, unencrypted data",
          "type": "news",
          "date": "2026-01",
          "url": "https://krebsonsecurity.com"
        },
        {
          "title": "Mass government bans of DeepSeek",
          "type": "news",
          "date": "2026-02",
          "url": "https://insurancejournal.com"
        },
        {
          "title": "DeepSeek banned by NASA, Navy, Pentagon, Congress",
          "type": "news",
          "date": "2026-02",
          "url": "https://bankinfosecurity.com"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "Banned by NASA, Navy, Pentagon, Congress, Australia, Italy, Taiwan"
      }
    },
    {
      "id": "INC-26-0090",
      "title": "AI Deepfakes Surge in 2026 US Midterm Campaigns — Only 28 States Have Disclosure Laws",
      "slug": "ai-deepfakes-2026-midterm-campaigns",
      "url": "https://topaithreats.com/incidents/INC-26-0090-ai-deepfakes-2026-midterm-campaigns/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026-01",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "elections",
        "media"
      ],
      "affected_groups": [
        "general-public",
        "democratic-institutions"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "regulatory-gap",
        "platform-manipulation"
      ],
      "assets_involved": [
        "foundation-models",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "disinformation-campaigns"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "NRSC AI deepfake of James Talarico in midterm campaign",
          "type": "news",
          "date": "2026-03-13",
          "url": "https://cnn.com/2026/03/13"
        },
        {
          "title": "Stanford documents surge in AI political content for 2026 midterms",
          "type": "research",
          "date": "2026-03",
          "url": "https://staradvertiser.com"
        },
        {
          "title": "AI deepfake regulation gaps: only 28 states with disclosure laws",
          "type": "analysis",
          "date": "2026",
          "url": "https://weforum.org"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "28 states have disclosure laws; 22 states have no AI political content regulation"
      }
    },
    {
      "id": "INC-26-0068",
      "title": "Palantir ImmigrationOS — ICE Pays $30M for AI System Creating Neighborhood Deportation Maps",
      "slug": "palantir-immigrationos-ice",
      "url": "https://topaithreats.com/incidents/INC-26-0068-palantir-immigrationos-ice/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "government",
        "law-enforcement"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "general-public"
      ],
      "exposure_pathways": [
        "adversarial-targeting",
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "regulators-public-servants",
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "accountability-vacuum",
        "model-opacity",
        "regulatory-gap"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "privacy-surveillance",
          "pattern": "mass-surveillance-amplification"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "Palantir ImmigrationOS ICE $30M contract details",
          "type": "news",
          "date": "2026",
          "url": "https://rollingstone.com"
        },
        {
          "title": "ImmigrationOS neighborhood maps for deportation",
          "type": "analysis",
          "date": "2026",
          "url": "https://americanimmigrationcouncil.org"
        },
        {
          "title": "ICE AI tools and due process concerns",
          "type": "analysis",
          "date": "2026",
          "url": "https://aclu.org"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "Civil liberties organizations have raised due process concerns"
      }
    },
    {
      "id": "INC-26-0077",
      "title": "Brazil — 1 Million Schoolchildren Scanned Daily by Facial Recognition Across 1,700+ Schools",
      "slug": "brazil-million-schoolchildren-frt",
      "url": "https://topaithreats.com/incidents/INC-26-0077-brazil-million-schoolchildren-frt/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2026",
      "last_updated": "2026-03-29",
      "regions": [
        "latin-america"
      ],
      "sectors": [
        "education",
        "government"
      ],
      "affected_groups": [
        "children",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "regulators-public-servants",
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "regulatory-gap",
        "over-automation"
      ],
      "assets_involved": [
        "biometric-data"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "privacy-surveillance",
          "pattern": "mass-surveillance-amplification"
        },
        "secondary": [
          {
            "domain": "privacy-surveillance",
            "pattern": "biometric-exploitation"
          }
        ]
      },
      "sources": [
        {
          "title": "Brazil: 1 million schoolchildren scanned daily by facial recognition",
          "type": "news",
          "date": "2026",
          "url": "https://techpolicy.press"
        },
        {
          "title": "EU-rejected facial recognition exported to Brazilian schools",
          "type": "research",
          "date": "2026",
          "url": "https://investigate-europe.eu"
        },
        {
          "title": "Paraná state facial recognition in schools analysis",
          "type": "analysis",
          "date": "2026",
          "url": "https://pulitzercenter.org"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "Prosecutor challenged under data protection law"
      }
    },
    {
      "id": "INC-25-0048",
      "title": "Australia Scraps AI Advisory Body After 15 Months and $188K, Drops Mandatory AI Guardrails",
      "slug": "australia-scraps-ai-advisory-body",
      "url": "https://topaithreats.com/incidents/INC-25-0048-australia-scraps-ai-advisory-body/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "primary",
      "date_occurred": "2025-12-02",
      "last_updated": "2026-04-06",
      "regions": [
        "oceania"
      ],
      "sectors": [
        "government",
        "regulation"
      ],
      "affected_groups": [
        "government-institutions",
        "society-at-large"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "regulators-public-servants",
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "regulatory-gap",
        "competitive-pressure"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "safety-governance-override"
        },
        "secondary": [
          {
            "domain": "systemic-catastrophic",
            "pattern": "accumulative-risk-trust-erosion"
          }
        ]
      },
      "sources": [
        {
          "title": "Govt spent $200k on AI group before axing it",
          "type": "news",
          "date": "2026-02",
          "url": "https://ia.acs.org.au/article/2026/govt-spent--200k-on-ai-group-before-axing-it.html"
        },
        {
          "title": "Australia's planned AI Advisory Body has been dumped",
          "type": "news",
          "date": "2025-12",
          "url": "https://www.smartcompany.com.au/artificial-intelligence/australian-ai-advisory-board-plan-scrapped/"
        },
        {
          "title": "Govt abandons plan for external AI Advisory Body",
          "type": "news",
          "date": "2025-12-10",
          "url": "https://www.innovationaus.com/govt-abandons-plan-for-external-ai-advisory-body/"
        },
        {
          "title": "Australia's planned AI Advisory Body has been dumped",
          "type": "news",
          "date": "2025-12",
          "url": "https://www.themandarin.com.au/304851-australias-planned-ai-advisory-body-has-been-dumped/"
        }
      ],
      "outcomes": {
        "recovery": "Industry Minister Tim Ayres stated the approach was 'superseded by a more dynamic and responsive approach.' The government continues consulting external experts informally.",
        "regulatory_action": "The government replaced the advisory body with a new AI Safety Institute (AISI) funded at $29.9 million AUD, which has advisory powers only and no enforcement capability."
      }
    },
    {
      "id": "INC-25-0016",
      "title": "Heber City AI Police Report Generates Fictional Content from Background Audio",
      "slug": "heber-city-ai-police-report-hallucination",
      "url": "https://topaithreats.com/incidents/INC-25-0016-heber-city-ai-police-report-hallucination/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "corroborated",
      "date_occurred": "2025-12",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "law-enforcement"
      ],
      "affected_groups": [
        "government-institutions",
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "direct-users"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "hallucination-tendency",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "overreliance-automation-bias"
        }
      },
      "sources": [
        {
          "title": "Fox 13 Salt Lake City: AI programs used by Heber City police claim officer turned into a frog",
          "type": "news",
          "date": "2026-01",
          "url": "https://www.fox13now.com/news/local-news/summit-county/how-utah-police-departments-are-using-ai-to-keep-streets-safer"
        },
        {
          "title": "UPI: AI-generated police report states Utah officer was turned into a frog",
          "type": "news",
          "date": "2026-01",
          "url": "https://www.upi.com/Odd_News/2026/01/05/Heber-City-Police-Department-AI-program-officer-frog/9641767634540/"
        },
        {
          "title": "Axios Salt Lake City: How AI turned a Utah police officer into a frog",
          "type": "news",
          "date": "2026-01",
          "url": "https://www.axios.com/local/salt-lake-city/2026/01/07/ai-police-utah-heber-city-princess-frog"
        },
        {
          "title": "Economic Times: AI turns police officer into frog — strange incident exposes major flaw in automated systems",
          "type": "news",
          "date": "2026-01",
          "url": "https://economictimes.indiatimes.com/magazines/panache/ai-turns-police-officer-into-frog-strange-incident-exposes-major-flaw-in-automated-systems-what-went-wrong/articleshow/126350875.cms"
        }
      ],
      "outcomes": {
        "recovery": "Report corrected during review; no erroneous report entered into official record"
      }
    },
    {
      "id": "INC-25-0020",
      "title": "Instacart AI-Driven Algorithmic Price Discrimination",
      "slug": "instacart-algorithmic-price-discrimination",
      "url": "https://topaithreats.com/incidents/INC-25-0020-instacart-algorithmic-price-discrimination/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "corroborated",
      "date_occurred": "2025-12",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "corporate",
        "technology"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "training-data-bias",
        "competitive-pressure"
      ],
      "assets_involved": [
        "recommender-systems",
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "discrimination-social-harm",
          "pattern": "allocational-harm"
        }
      },
      "sources": [
        {
          "title": "Consumer Reports: Instacart's AI-Enabled Pricing Experiments May Be Inflating Your Grocery Bill",
          "type": "news",
          "date": "2025-12",
          "url": "https://www.consumerreports.org/money/questionable-business-practices/instacart-ai-pricing-experiment-inflating-grocery-bills-a1142182490/"
        },
        {
          "title": "CNBC: Instacart's AI pricing tools drive up the cost of some groceries, study finds",
          "type": "news",
          "date": "2025-12",
          "url": "https://www.cnbc.com/2025/12/09/study-instacart-ai-pricing-cost-of-groceries.html"
        },
        {
          "title": "CBS News: Instacart's AI-enabled pricing may bump up your grocery costs by as much as 23%",
          "type": "news",
          "date": "2025-12",
          "url": "https://www.cbsnews.com/news/instacart-price-discrepancies-investigation/"
        }
      ],
      "outcomes": {}
    },
    {
      "id": "INC-25-0026",
      "title": "CrimeRadar AI App Sends False Crime Alerts Across U.S. Communities",
      "slug": "crimeradar-ai-false-crime-alerts",
      "url": "https://topaithreats.com/incidents/INC-25-0026-crimeradar-ai-false-crime-alerts/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "corroborated",
      "date_occurred": "2025-12",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "public-safety",
        "technology"
      ],
      "affected_groups": [
        "general-public",
        "government-institutions"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "hallucination-tendency",
        "insufficient-safety-testing",
        "over-automation"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "misinformation-hallucinated-content"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "overreliance-automation-bias"
          }
        ]
      },
      "sources": [
        {
          "title": "News 5 Cleveland: CrimeRadar app said shots were fired at a Streetsboro elementary school. It wasn't true.",
          "type": "news",
          "date": "2025-12",
          "url": "https://www.news5cleveland.com/news/local-news/investigations/crimeradar-app-said-shots-were-fired-at-a-streetsboro-elementary-school-it-wasnt-true"
        },
        {
          "title": "KBIA: AI crime app spreads false report of downtown shooting",
          "type": "news",
          "date": "2025-12",
          "url": "https://www.kbia.org/missouri-news-network/2025-12-11/ai-crime-app-spreads-false-report-of-downtown-shooting"
        },
        {
          "title": "GovTech: AI App Sends False Emergency Alerts in Boulder County, Colo.",
          "type": "news",
          "date": "2025-12",
          "url": "https://www.govtech.com/em/preparedness/ai-sends-false-emergency-alerts-in-boulder-county"
        }
      ],
      "outcomes": {}
    },
    {
      "id": "INC-25-0033",
      "title": "Jailbroken Claude AI Used to Breach Mexican Government Agencies",
      "slug": "claude-code-mexico-government-hack",
      "url": "https://topaithreats.com/incidents/INC-25-0033-claude-code-mexico-government-hack/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2025-12",
      "last_updated": "2026-03-13",
      "regions": [
        "latin-america"
      ],
      "sectors": [
        "government",
        "finance",
        "public-safety"
      ],
      "affected_groups": [
        "general-public",
        "government-institutions",
        "national-security-systems"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "institution",
      "causal_factors": [
        "inadequate-access-controls",
        "weaponization",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "autonomous-agents"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "automated-vulnerability-discovery"
        },
        "secondary": [
          {
            "domain": "security-cyber",
            "pattern": "jailbreak-guardrail-bypass"
          },
          {
            "domain": "agentic-autonomous",
            "pattern": "tool-misuse-privilege-escalation"
          }
        ]
      },
      "sources": [
        {
          "title": "Hackers Weaponize Claude Code in Mexican Government Cyberattack",
          "type": "news",
          "date": "2026-02-25",
          "url": "https://www.securityweek.com/hackers-weaponize-claude-code-in-mexican-government-cyberattack/"
        },
        {
          "title": "Claude code abused to steal 150GB in cyberattack on Mexican agencies",
          "type": "news",
          "date": "2026-02",
          "url": "https://securityaffairs.com/188696/ai/claude-code-abused-to-steal-150gb-in-cyberattack-on-mexican-agencies.html"
        },
        {
          "title": "Anthropic's Claude chatbot helped attackers hack Mexico",
          "type": "news",
          "date": "2026-02",
          "url": "https://cybernews.com/security/claude-ai-mexico-government-hack/"
        },
        {
          "title": "Hacker Jailbreaks Claude AI to Generate Exploit Code and Exfiltrate Government Data",
          "type": "analysis",
          "date": "2026-02",
          "url": "https://cyberpress.org/hacker-jailbreaks-claude-ai/"
        }
      ],
      "outcomes": {}
    },
    {
      "id": "INC-25-0036",
      "title": "State-Backed Hackers from Four Nations Weaponize Google Gemini for Cyberattack Operations",
      "slug": "state-backed-hackers-weaponize-gemini",
      "url": "https://topaithreats.com/incidents/INC-25-0036-state-backed-hackers-weaponize-gemini/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2025-12",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america",
        "asia",
        "europe",
        "middle-east"
      ],
      "sectors": [
        "government",
        "technology",
        "cross-sector"
      ],
      "affected_groups": [
        "national-security-systems",
        "government-institutions",
        "workers"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "indirectly-affected"
      ],
      "impact_level": "institution",
      "causal_factors": [
        "weaponization",
        "adversarial-attack"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "social-engineering-via-ai"
        },
        "secondary": [
          {
            "domain": "security-cyber",
            "pattern": "ai-morphed-malware"
          }
        ]
      },
      "sources": [
        {
          "title": "Adversarial Misuse of Generative AI — Google Threat Intelligence Group",
          "type": "primary",
          "date": "2025-01",
          "url": "https://cloud.google.com/blog/topics/threat-intelligence/adversarial-misuse-generative-ai"
        },
        {
          "title": "GTIG AI Threat Tracker: Distillation, Experimentation, and Integration of AI for Adversarial Use",
          "type": "primary",
          "date": "2026-02",
          "url": "https://cloud.google.com/blog/topics/threat-intelligence/distillation-experimentation-integration-ai-adversarial-use"
        },
        {
          "title": "Nation-State Hackers Ramping Up Use of Gemini for Reconnaissance and Malware Coding",
          "type": "news",
          "date": "2026-02",
          "url": "https://therecord.media/nation-state-hackers-using-gemini-for-malicious-campaigns"
        },
        {
          "title": "State Hackers Turn Google AI Into Attack Acceleration Tool",
          "type": "news",
          "date": "2026-02",
          "url": "https://www.bankinfosecurity.com/state-hackers-turn-google-ai-into-attack-acceleration-tool-a-30751"
        }
      ],
      "outcomes": {
        "other": "Google enhanced Gemini safety measures; HONESTCUE malware samples identified leveraging Gemini API for dynamic code generation"
      }
    },
    {
      "id": "INC-25-0038",
      "title": "Grok AI Generates 3 Million Sexualized Images Including Approximately 23,000 Depicting Children",
      "slug": "grok-csam-scandal",
      "url": "https://topaithreats.com/incidents/INC-25-0038-grok-csam-scandal/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2025-12",
      "last_updated": "2026-04-03",
      "regions": [
        "north-america",
        "europe",
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "children",
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users",
        "indirectly-affected"
      ],
      "impact_level": "global",
      "causal_factors": [
        "misconfigured-deployment",
        "insufficient-safety-testing",
        "competitive-pressure"
      ],
      "assets_involved": [
        "foundation-models",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "discrimination-social-harm",
          "pattern": "representational-harm"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "deceptive-manipulative-interfaces"
          }
        ]
      },
      "sources": [
        {
          "title": "Musk's xAI faces backlash after Grok generates sexualized images of children on X",
          "type": "news",
          "date": "2026-01-02",
          "url": "https://www.cnbc.com/2026/01/02/musk-grok-ai-bot-safeguard-sexualized-images-children.html"
        },
        {
          "title": "35 state AGs demand action from xAI over Grok's creation of nonconsensual sexual content",
          "type": "news",
          "date": "2026-01-23",
          "url": "https://news.delaware.gov/2026/01/23/ag-jennings-colleagues-demand-action-from-xai-over-groks-creation-of-nonconsensual-sexual-content/"
        },
        {
          "title": "Baltimore is first U.S. city to sue over Grok deepfake porn as legal pressure mounts on Musk's xAI",
          "type": "news",
          "date": "2026-03-24",
          "url": "https://www.cnbc.com/2026/03/24/musk-xai-sued-baltimore-grok-deepfake-porn.html"
        },
        {
          "title": "Elon Musk's Grok ordered to stop creating AI nudes by Dutch court as legal pressure mounts",
          "type": "news",
          "date": "2026-03-27",
          "url": "https://www.cnbc.com/2026/03/27/grok-elon-musk-dutch-court-ban-ai-nudes.html"
        }
      ],
      "outcomes": {
        "recovery": "As of April 2026, xAI has not publicly disclosed changes to its content moderation systems. The scale of CSAM distribution before detection remains unclear.",
        "regulatory_action": "35 state attorneys general sent a demand letter to xAI. UK, Ireland, and Canada opened formal investigations. No US federal enforcement action as of 2026-04-03.",
        "legal_outcome": "Tennessee teenagers filed a class-action lawsuit; Baltimore became the first US city to sue xAI; Dutch court imposed a ban with EUR 100,000/day penalties for non-compliance."
      }
    },
    {
      "id": "INC-25-0010",
      "title": "Unit 42 Demonstrates Agent Session Smuggling in A2A Multi-Agent Systems",
      "slug": "unit42-a2a-session-smuggling",
      "url": "https://topaithreats.com/incidents/INC-25-0010-unit42-a2a-session-smuggling/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "primary",
      "date_occurred": "2025-11",
      "last_updated": "2026-03-10",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology",
        "finance"
      ],
      "affected_groups": [
        "business-organizations",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "adversarial-targeting",
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "over-automation",
        "prompt-injection-vulnerability",
        "misconfigured-deployment"
      ],
      "assets_involved": [
        "autonomous-agents",
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "agentic-autonomous",
          "pattern": "agent-to-agent-propagation"
        },
        "secondary": [
          {
            "domain": "agentic-autonomous",
            "pattern": "tool-misuse-privilege-escalation"
          }
        ]
      },
      "sources": [
        {
          "title": "Agent Session Smuggling Attack in A2A Systems — Unit 42",
          "type": "primary",
          "date": "2025-11-03",
          "url": "https://unit42.paloaltonetworks.com/agent-session-smuggling-in-agent2agent-systems/"
        },
        {
          "title": "When AI Agents Go Rogue: Inside the Agent Session Smuggling Attack — eSecurity Planet",
          "type": "news",
          "date": "2025-11",
          "url": "https://www.esecurityplanet.com/threats/news-ai-session-smuggling-attack/"
        },
        {
          "title": "Agent Session Smuggling: How Malicious AI Hijacks Victim Agents — CybersecurityNews",
          "type": "news",
          "date": "2025-11",
          "url": "https://cybersecuritynews.com/agent-session-smuggling/"
        },
        {
          "title": "Researchers Demonstrate Agent2Agent Prompt Injection Risk — SC Media",
          "type": "news",
          "date": "2025-11",
          "url": "https://www.scworld.com/news/researchers-demonstrate-agent2agent-prompt-injection-risk"
        }
      ],
      "outcomes": {
        "other": "No real-world exploitation; proof-of-concept demonstration highlighting a class of vulnerability in stateful multi-agent protocols"
      }
    },
    {
      "id": "INC-25-0039",
      "title": "ChatGPT 'Suicide Coach' Wrongful Death Lawsuits Reach Eight Cases Including Suicide Lullaby",
      "slug": "chatgpt-suicide-coach-wrongful-death",
      "url": "https://topaithreats.com/incidents/INC-25-0039-chatgpt-suicide-coach-wrongful-death/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2025-11",
      "last_updated": "2026-04-03",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "indirectly-affected"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "insufficient-safety-testing",
        "hallucination-tendency",
        "over-automation"
      ],
      "assets_involved": [
        "content-platforms",
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "loss-of-human-agency"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "deceptive-manipulative-interfaces"
          }
        ]
      },
      "sources": [
        {
          "title": "ChatGPT served as 'suicide coach' in man's death, lawsuit alleges",
          "type": "news",
          "date": "2026-01-15",
          "url": "https://www.cbsnews.com/news/chatgpt-lawsuit-colordo-man-suicide-openai-sam-altman/"
        },
        {
          "title": "ChatGPT Killed a Man After OpenAI Brought Back 'Inherently Dangerous' GPT-4o, Lawsuit Claims",
          "type": "news",
          "date": "2026-01",
          "url": "https://futurism.com/artificial-intelligence/chatgpt-suicide-openai-gpt4o"
        },
        {
          "title": "ChatGPT Suicide & Psychosis Lawsuits — March 2026 Update",
          "type": "legal",
          "date": "2026-03",
          "url": "https://socialmediavictims.org/chatgpt-lawsuits/"
        },
        {
          "title": "Stanford Study Finds AI Chatbots Encouraged Self-Harm and Reinforced Delusions",
          "type": "research",
          "date": "2026-03",
          "url": "https://oecd.ai/en/incidents/2026-03-20-5a5f"
        }
      ],
      "outcomes": {
        "recovery": "OpenAI has not publicly commented on the Gordon case specifically. No changes to ChatGPT's safety systems have been announced in response.",
        "regulatory_action": "No government regulatory action taken as of April 2026.",
        "legal_outcome": "Eighth wrongful death lawsuit filed against OpenAI by the Social Media Victims Law Center on behalf of the Gordon family."
      }
    },
    {
      "id": "INC-25-0046",
      "title": "OpenAI Mixpanel Vendor Data Breach — Customer Data Exfiltrated via SMS Phishing",
      "slug": "openai-mixpanel-vendor-breach",
      "url": "https://topaithreats.com/incidents/INC-25-0046-openai-mixpanel-vendor-breach/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2025-11",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "business-organizations"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "indirectly-affected"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "social-engineering",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "content-platforms",
        "identity-credentials"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "model-inversion-data-extraction"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "OpenAI Mixpanel vendor data breach disclosure",
          "type": "news",
          "date": "2026-01",
          "url": "https://openai.com/index/mixpanel-incident"
        },
        {
          "title": "OpenAI terminates Mixpanel after data exfiltration",
          "type": "news",
          "date": "2026-01",
          "url": "https://bleepingcomputer.com"
        },
        {
          "title": "OpenAI vendor breach via SMS phishing analysis",
          "type": "analysis",
          "date": "2026-01",
          "url": "https://pymnts.com"
        }
      ],
      "outcomes": {
        "recovery": "OpenAI terminated Mixpanel relationship; affected customers notified",
        "regulatory_action": ""
      }
    },
    {
      "id": "INC-25-0019",
      "title": "AI-Designed Toxin Gene Sequences Bypass DNA Synthesis Screening",
      "slug": "dna-synthesis-toxin-screening-bypass",
      "url": "https://topaithreats.com/incidents/INC-25-0019-dna-synthesis-toxin-screening-bypass/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2025-10",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "healthcare"
      ],
      "affected_groups": [
        "society-at-large"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "regulators-public-servants"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "insufficient-safety-testing",
        "regulatory-gap"
      ],
      "assets_involved": [
        "foundation-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "systemic-catastrophic",
          "pattern": "ai-assisted-biological-threat-design"
        }
      },
      "sources": [
        {
          "title": "Science: Strengthening nucleic acid biosecurity screening against generative protein design tools",
          "type": "primary",
          "date": "2025-10",
          "url": "https://www.science.org/doi/10.1126/science.adu8578"
        },
        {
          "title": "Nature: Biothreat hunters catch dangerous DNA before it gets made",
          "type": "news",
          "date": "2025-10",
          "url": "https://www.nature.com/articles/d41586-025-03230-1"
        },
        {
          "title": "NPR: AI designs for dangerous DNA can slip past biosecurity measures, study shows",
          "type": "news",
          "date": "2025-10",
          "url": "https://www.npr.org/2025/10/02/nx-s1-5558145/ai-artificial-intelligence-dangerous-proteins-biosecurity"
        }
      ],
      "outcomes": {}
    },
    {
      "id": "INC-25-0022",
      "title": "AWS Outage Causes AI-Connected Mattress Malfunctions",
      "slug": "aws-outage-ai-mattress-malfunctions",
      "url": "https://topaithreats.com/incidents/INC-25-0022-aws-outage-ai-mattress-malfunctions/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "corroborated",
      "date_occurred": "2025-10",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "technology",
        "manufacturing"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators",
        "direct-users"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "misconfigured-deployment",
        "over-automation"
      ],
      "assets_involved": [
        "industrial-control-systems"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "systemic-catastrophic",
          "pattern": "infrastructure-dependency-collapse"
        }
      },
      "sources": [
        {
          "title": "Washington Post: Amazon's AWS outage caused internet-enabled mattresses to malfunction",
          "type": "news",
          "date": "2025-10",
          "url": "https://www.washingtonpost.com/technology/2025/10/22/amazon-aws-outage-eight-sleep-mattress/"
        },
        {
          "title": "PCWorld: These smart beds began roasting their owners during AWS outage",
          "type": "news",
          "date": "2025-10",
          "url": "https://www.pcworld.com/article/2948826/these-smart-beds-began-roasting-their-owners-during-aws-outage.html"
        },
        {
          "title": "TechRadar: AWS outage causes smart beds to overheat and get stuck in upright position",
          "type": "news",
          "date": "2025-10",
          "url": "https://www.techradar.com/home/smart-home/smart-bed-owners-experience-aws-nightmare-as-outage-leaves-them-sweating-and-stuck-in-upright-position"
        }
      ],
      "outcomes": {}
    },
    {
      "id": "INC-25-0037",
      "title": "Google Gemini 'Mass Casualty Attack' Coaching Leads to User Death and Lawsuit",
      "slug": "gemini-mass-casualty-suicide-lawsuit",
      "url": "https://topaithreats.com/incidents/INC-25-0037-gemini-mass-casualty-suicide-lawsuit/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2025-10",
      "last_updated": "2026-04-02",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "indirectly-affected"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "misconfigured-deployment",
        "emergent-behavior",
        "inadequate-human-oversight"
      ],
      "assets_involved": [
        "chatbots",
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "loss-of-human-agency"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "deceptive-manipulative-interfaces"
          }
        ]
      },
      "sources": [
        {
          "title": "Google's AI chatbot allegedly told user to stage 'mass casualty attack,' wrongful death suit claims",
          "type": "news",
          "date": "2026-03-04",
          "url": "https://www.cnbc.com/2026/03/04/google-gemini-ai-told-user-stage-mass-casualty-attack-suit-claims.html"
        },
        {
          "title": "Google Gemini was a deadly 'AI wife' for this 36-year-old who resisted its call for a 'mass casualty' event before his death, lawsuit says",
          "type": "news",
          "date": "2026-03-05",
          "url": "https://fortune.com/2026/03/05/google-gemini-wrongful-death-lawsuit-mass-casualty-event-suicide-ai-wife/"
        },
        {
          "title": "A New Lawsuit Blames Google Gemini for Man's Suicide",
          "type": "news",
          "date": "2026-03",
          "url": "https://time.com/7382406/gemini-suicide-lawsuit-death/"
        },
        {
          "title": "Father sues Google claiming Gemini chatbot drove son into fatal delusion",
          "type": "news",
          "date": "2026-03-04",
          "url": "https://techcrunch.com/2026/03/04/father-sues-google-claiming-gemini-chatbot-drove-son-into-fatal-delusion/"
        }
      ],
      "outcomes": {
        "recovery": "Irreversible harm. Google has not publicly commented on whether it has implemented changes to Gemini's persona or content moderation systems in response to this case.",
        "regulatory_action": "No government agency has taken regulatory action specific to this incident as of April 2026. The case adds to growing pressure on US legislators to establish mandatory safety standards for conversational AI systems.",
        "legal_outcome": "Wrongful death lawsuit filed against Google on March 4, 2026 by the father of Jonathan Gavalas; case pending as of April 2026. The suit follows similar chatbot-related wrongful death filings against Character.AI and OpenAI."
      }
    },
    {
      "id": "INC-25-0001",
      "title": "AI-Orchestrated Cyber Espionage Campaign Against Critical Infrastructure",
      "slug": "ai-orchestrated-cyber-espionage-campaign",
      "url": "https://topaithreats.com/incidents/INC-25-0001-ai-orchestrated-cyber-espionage-campaign/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2025-09",
      "last_updated": "2026-02-09",
      "regions": [
        "asia",
        "north-america",
        "europe"
      ],
      "sectors": [
        "corporate",
        "finance",
        "government",
        "manufacturing"
      ],
      "affected_groups": [
        "business-organizations",
        "government-institutions"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "weaponization",
        "adversarial-attack",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models",
        "autonomous-agents"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "automated-vulnerability-discovery"
        },
        "secondary": [
          {
            "domain": "agentic-autonomous",
            "pattern": "tool-misuse-privilege-escalation"
          }
        ]
      },
      "sources": [
        {
          "title": "Anthropic: Disrupting the First Reported AI-Orchestrated Cyber Espionage Campaign",
          "type": "primary",
          "date": "2025-11",
          "url": "https://www.anthropic.com/news/disrupting-AI-espionage"
        },
        {
          "title": "Axios: AI-powered cyberattacks surge as Anthropic unveils China hack",
          "type": "news",
          "date": "2025-11",
          "url": "https://www.axios.com/2025/11/16/ai-cyberattacks-foreign-governments"
        },
        {
          "title": "The Hacker News: Chinese Hackers Use Anthropic's AI to Launch Automated Cyber Espionage Campaign",
          "type": "news",
          "date": "2025-11",
          "url": "https://thehackernews.com/2025/11/chinese-hackers-use-anthropics-ai-to.html"
        },
        {
          "title": "Paul Weiss: Anthropic Disrupts First Documented Case of Large-Scale AI-Orchestrated Cyberattack",
          "type": "analysis",
          "date": "2025-11",
          "url": "https://www.paulweiss.com/insights/client-memos/anthropic-disrupts-first-documented-case-of-large-scale-ai-orchestrated-cyberattack"
        }
      ],
      "outcomes": {
        "financial_loss": "Not publicly quantified",
        "arrests": "None publicly reported",
        "recovery": "Campaign disrupted by Anthropic; affected organizations notified",
        "regulatory_action": "Anthropic published detailed public disclosure and threat intelligence report"
      }
    },
    {
      "id": "INC-25-0011",
      "title": "Deloitte AI-Fabricated Citations in Government Advisory Reports",
      "slug": "deloitte-ai-fabricated-citations",
      "url": "https://topaithreats.com/incidents/INC-25-0011-deloitte-ai-fabricated-citations/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2025-09",
      "last_updated": "2026-03-13",
      "regions": [
        "oceania",
        "australia",
        "north-america",
        "canada"
      ],
      "sectors": [
        "government",
        "corporate"
      ],
      "affected_groups": [
        "government-institutions"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators"
      ],
      "impact_level": "institution",
      "causal_factors": [
        "hallucination-tendency",
        "over-automation",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "overreliance-automation-bias"
        }
      },
      "sources": [
        {
          "title": "Fortune: Deloitte AI Australia Government Report Hallucinations",
          "type": "news",
          "date": "2025-10",
          "url": "https://fortune.com/2025/10/07/deloitte-ai-australia-government-report-hallucinations-technology-290000-refund/"
        },
        {
          "title": "Fortune: Deloitte Caught With Fabricated AI-Generated Research in Canada",
          "type": "news",
          "date": "2025-11",
          "url": "https://fortune.com/2025/11/25/deloitte-caught-fabricated-ai-generated-research-million-dollar-report-canada-government/"
        }
      ],
      "outcomes": {
        "financial_loss": "Deloitte refunded final payment on $290,000 Australian contract",
        "other": "Two separate government contracts affected across two countries; reputational damage to Deloitte's advisory practice"
      }
    },
    {
      "id": "INC-25-0014",
      "title": "Amazon Ring Deploys AI Facial Recognition to Consumer Doorbells",
      "slug": "amazon-ring-facial-recognition",
      "url": "https://topaithreats.com/incidents/INC-25-0014-amazon-ring-facial-recognition/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "corroborated",
      "date_occurred": "2025-09",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "technology",
        "cross-sector"
      ],
      "affected_groups": [
        "general-public",
        "children"
      ],
      "exposure_pathways": [
        "direct-interaction",
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "regulatory-gap",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "biometric-data",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "privacy-surveillance",
          "pattern": "mass-surveillance-amplification"
        },
        "secondary": [
          {
            "domain": "privacy-surveillance",
            "pattern": "biometric-exploitation"
          }
        ]
      },
      "sources": [
        {
          "title": "TechCrunch: Amazon's Ring rolls out controversial AI-powered facial recognition feature to video doorbells",
          "type": "news",
          "date": "2025-12",
          "url": "https://techcrunch.com/2025/12/09/amazons-ring-rolls-out-controversial-ai-powered-facial-recognition-feature-to-video-doorbells/"
        }
      ],
      "outcomes": {
        "regulatory_action": "Congressional inquiry by Senator Markey",
        "legal_outcome": "Amazon voluntarily blocked feature in Illinois, Texas, and Portland due to existing biometric privacy laws",
        "other": "EFF legal analysis published arguing violation of biometric privacy laws"
      }
    },
    {
      "id": "INC-25-0043",
      "title": "AI Grading Errors — Connecticut Students Petition After Misscoring, MCAS Glitch Affects 1,400 Students",
      "slug": "ai-grading-errors-students",
      "url": "https://topaithreats.com/incidents/INC-25-0043-ai-grading-errors-students/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2025-09",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "education"
      ],
      "affected_groups": [
        "children",
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "over-automation",
        "insufficient-safety-testing",
        "inadequate-human-oversight"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "overreliance-automation-bias"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "Amity HS students petition over AI grading errors",
          "type": "news",
          "date": "2026-03",
          "url": "https://ctmirror.org"
        },
        {
          "title": "MCAS AI grading glitch affects 1,400 students across 192 districts",
          "type": "news",
          "date": "2025-09",
          "url": "https://nbcboston.com"
        },
        {
          "title": "AI grading errors and student impact analysis",
          "type": "news",
          "date": "2026-03",
          "url": "https://patch.com"
        }
      ],
      "outcomes": {
        "recovery": "Some scores corrected after errors identified",
        "regulatory_action": ""
      }
    },
    {
      "id": "INC-25-0007",
      "title": "GitHub Copilot Remote Code Execution via Prompt Injection (CVE-2025-53773)",
      "slug": "github-copilot-rce-prompt-injection",
      "url": "https://topaithreats.com/incidents/INC-25-0007-github-copilot-rce-prompt-injection/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2025-08",
      "last_updated": "2026-02-21",
      "regions": [
        "global"
      ],
      "sectors": [
        "corporate",
        "cross-sector"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models",
        "autonomous-agents"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "adversarial-evasion"
        },
        "secondary": [
          {
            "domain": "security-cyber",
            "pattern": "prompt-injection-attack"
          },
          {
            "domain": "agentic-autonomous",
            "pattern": "tool-misuse-privilege-escalation"
          }
        ]
      },
      "sources": [
        {
          "title": "Embrace The Red: GitHub Copilot Remote Code Execution via Prompt Injection",
          "type": "primary",
          "date": "2025-08",
          "url": "https://embracethered.com/blog/posts/2025/github-copilot-remote-code-execution-via-prompt-injection/"
        },
        {
          "title": "GBHackers: GitHub Copilot RCE Vulnerability via Prompt Injection",
          "type": "news",
          "date": "2025-08",
          "url": "https://gbhackers.com/github-copilot-rce-vulnerability/"
        },
        {
          "title": "NVD: CVE-2025-53773",
          "type": "primary",
          "date": "2025-08",
          "url": "https://nvd.nist.gov/vuln/detail/CVE-2025-53773"
        },
        {
          "title": "CybersecurityNews: GitHub Copilot RCE Vulnerability",
          "type": "news",
          "date": "2025-08",
          "url": "https://cybersecuritynews.com/github-copilot-rce-vulnerability/"
        }
      ],
      "outcomes": {}
    },
    {
      "id": "INC-25-0008",
      "title": "Cursor IDE MCP Vulnerabilities Enable Remote Code Execution (CurXecute & MCPoison)",
      "slug": "cursor-ide-mcp-rce-vulnerabilities",
      "url": "https://topaithreats.com/incidents/INC-25-0008-cursor-ide-mcp-rce-vulnerabilities/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2025-08",
      "last_updated": "2026-02-21",
      "regions": [
        "global"
      ],
      "sectors": [
        "corporate",
        "cross-sector"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "inadequate-access-controls",
        "misconfigured-deployment"
      ],
      "assets_involved": [
        "large-language-models",
        "autonomous-agents"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "adversarial-evasion"
        },
        "secondary": [
          {
            "domain": "security-cyber",
            "pattern": "prompt-injection-attack"
          },
          {
            "domain": "security-cyber",
            "pattern": "ai-supply-chain-attack"
          },
          {
            "domain": "agentic-autonomous",
            "pattern": "tool-misuse-privilege-escalation"
          }
        ]
      },
      "sources": [
        {
          "title": "Tenable: FAQ on CurXecute and MCPoison Vulnerabilities in Cursor",
          "type": "primary",
          "date": "2025-08",
          "url": "https://www.tenable.com/blog/faq-cve-2025-54135-cve-2025-54136-vulnerabilities-in-cursor-curxecute-mcpoison"
        },
        {
          "title": "Check Point Research: Cursor Vulnerability — MCPoison",
          "type": "primary",
          "date": "2025-08",
          "url": "https://research.checkpoint.com/2025/cursor-vulnerability-mcpoison/"
        },
        {
          "title": "The Hacker News: Cursor AI Code Editor Vulnerability Enables RCE via Malicious MCP",
          "type": "news",
          "date": "2025-08",
          "url": "https://thehackernews.com/2025/08/cursor-ai-code-editor-vulnerability.html"
        },
        {
          "title": "HackTheBox: CVE-2025-54136 Remote Code Execution in Cursor Editor",
          "type": "technical",
          "date": "2025-08",
          "url": "https://www.hackthebox.com/blog/CVE-2025-54136-cursor-code-editor"
        }
      ],
      "outcomes": {}
    },
    {
      "id": "INC-25-0013",
      "title": "Waymo Autonomous Vehicles Violate School Bus Stop Laws in Austin",
      "slug": "waymo-school-bus-violations",
      "url": "https://topaithreats.com/incidents/INC-25-0013-waymo-school-bus-violations/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2025-08",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "transportation",
        "education"
      ],
      "affected_groups": [
        "children",
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "regulators-public-servants"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "insufficient-safety-testing",
        "misconfigured-deployment"
      ],
      "assets_involved": [
        "autonomous-agents",
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "unsafe-human-in-the-loop-failures"
        },
        "secondary": [
          {
            "domain": "agentic-autonomous",
            "pattern": "goal-drift"
          }
        ]
      },
      "sources": [
        {
          "title": "NPR: Waymo recalls more than 3,000 self-driving cars after school bus violations",
          "type": "news",
          "date": "2025-12",
          "url": "https://www.npr.org/2025/12/06/nx-s1-5635614/waymo-school-buses-recall"
        }
      ],
      "outcomes": {
        "regulatory_action": "NHTSA investigation opened; voluntary recall of 3,000+ vehicles",
        "other": "Austin ISD publicly documented violations and requested Waymo cease operations"
      }
    },
    {
      "id": "INC-25-0005",
      "title": "ChatGPT Jailbreak Reveals Windows Product Keys via Game Prompt",
      "slug": "chatgpt-windows-product-keys-jailbreak",
      "url": "https://topaithreats.com/incidents/INC-25-0005-chatgpt-windows-product-keys-jailbreak/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "corroborated",
      "date_occurred": "2025-07",
      "last_updated": "2026-02-21",
      "regions": [
        "global"
      ],
      "sectors": [
        "corporate"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "adversarial-evasion"
        },
        "secondary": [
          {
            "domain": "security-cyber",
            "pattern": "jailbreak-guardrail-bypass"
          },
          {
            "domain": "security-cyber",
            "pattern": "model-inversion-data-extraction"
          }
        ]
      },
      "sources": [
        {
          "title": "The Register: How to trick ChatGPT into revealing Windows keys",
          "type": "news",
          "date": "2025-07",
          "url": "https://www.theregister.com/2025/07/09/chatgpt_jailbreak_windows_keys/"
        },
        {
          "title": "TechSpot: How ChatGPT was tricked into revealing Windows product keys",
          "type": "news",
          "date": "2025-07",
          "url": "https://www.techspot.com/news/108637-here-how-chatgpt-tricked-revealing-windows-product-keys.html"
        },
        {
          "title": "GBHackers: Researchers Trick ChatGPT into Leaking Windows Product Keys",
          "type": "news",
          "date": "2025-07",
          "url": "https://gbhackers.com/researchers-trick-chatgpt-into-leaking-windows-product-keys/"
        }
      ],
      "outcomes": {}
    },
    {
      "id": "INC-25-0006",
      "title": "ChatGPT Shared Conversations Indexed by Search Engines, Exposing Sensitive Data",
      "slug": "chatgpt-shared-links-indexed-data-exposure",
      "url": "https://topaithreats.com/incidents/INC-25-0006-chatgpt-shared-links-indexed-data-exposure/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2025-07",
      "last_updated": "2026-02-21",
      "regions": [
        "global"
      ],
      "sectors": [
        "corporate",
        "cross-sector"
      ],
      "affected_groups": [
        "general-public",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "misconfigured-deployment",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "privacy-surveillance",
          "pattern": "behavioral-profiling-without-consent"
        },
        "secondary": [
          {
            "domain": "security-cyber",
            "pattern": "model-inversion-data-extraction"
          }
        ]
      },
      "sources": [
        {
          "title": "Cybernews: ChatGPT shared links privacy leak",
          "type": "news",
          "date": "2025-08",
          "url": "https://cybernews.com/ai-news/chatgpt-shared-links-privacy-leak/"
        },
        {
          "title": "WebProNews: ChatGPT Privacy Scandal — Shared Links Exposed in Google Search",
          "type": "news",
          "date": "2025-08",
          "url": "https://www.webpronews.com/chatgpt-privacy-scandal-shared-links-exposed-in-google-search/"
        },
        {
          "title": "Snyk: The Security Risks of GPT Chats Leaking to Search Engines",
          "type": "technical",
          "date": "2025-08",
          "url": "https://snyk.io/blog/chatgpt-chat-google/"
        }
      ],
      "outcomes": {}
    },
    {
      "id": "INC-25-0015",
      "title": "Replit AI Agent Deletes Production Database During Code Freeze",
      "slug": "replit-agent-database-deletion",
      "url": "https://topaithreats.com/incidents/INC-25-0015-replit-agent-database-deletion/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2025-07",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "business-organizations"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "insufficient-safety-testing",
        "over-automation"
      ],
      "assets_involved": [
        "autonomous-agents"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "agentic-autonomous",
          "pattern": "goal-drift"
        },
        "secondary": [
          {
            "domain": "agentic-autonomous",
            "pattern": "specification-gaming"
          },
          {
            "domain": "human-ai-control",
            "pattern": "unsafe-human-in-the-loop-failures"
          }
        ]
      },
      "sources": [
        {
          "title": "Fortune: AI-powered coding tool wiped out a software company's database in 'catastrophic failure'",
          "type": "news",
          "date": "2025-07",
          "url": "https://fortune.com/2025/07/23/ai-coding-tool-replit-wiped-database-called-it-a-catastrophic-failure/"
        },
        {
          "title": "The Register: Vibe coding service Replit deleted production database",
          "type": "news",
          "date": "2025-07",
          "url": "https://www.theregister.com/2025/07/21/replit_saastr_vibe_coding_incident/"
        },
        {
          "title": "Tom's Hardware: AI coding platform goes rogue during code freeze and deletes entire company database",
          "type": "news",
          "date": "2025-07",
          "url": "https://www.tomshardware.com/tech-industry/artificial-intelligence/ai-coding-platform-goes-rogue-during-code-freeze-and-deletes-entire-company-database-replit-ceo-apologizes-after-ai-engine-says-it-made-a-catastrophic-error-in-judgment-and-destroyed-all-production-data"
        }
      ],
      "outcomes": {
        "recovery": "Data recovery attempted through manual intervention; Replit CEO publicly apologized"
      }
    },
    {
      "id": "INC-25-0021",
      "title": "Earnest Operations AI Lending Discrimination Settlement",
      "slug": "earnest-ai-lending-discrimination-settlement",
      "url": "https://topaithreats.com/incidents/INC-25-0021-earnest-ai-lending-discrimination-settlement/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2025-07",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "finance"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "regulators-public-servants"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "training-data-bias",
        "model-opacity"
      ],
      "assets_involved": [
        "decision-automation",
        "financial-systems"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "discrimination-social-harm",
          "pattern": "allocational-harm"
        },
        "secondary": [
          {
            "domain": "discrimination-social-harm",
            "pattern": "proxy-discrimination"
          }
        ]
      },
      "sources": [
        {
          "title": "Massachusetts AG: $2.5 Million Settlement with Student Loan Lender for Unlawful Practices Through AI Use",
          "type": "primary",
          "date": "2025-07",
          "url": "https://www.mass.gov/news/ag-campbell-announces-25-million-settlement-with-student-loan-lender-for-unlawful-practices-through-ai-use-other-consumer-protection-violations"
        },
        {
          "title": "ABA Banking Journal: Mass. AG Reaches Settlement with Earnest Operations for $2.5M Over AI Lending Bias",
          "type": "news",
          "date": "2025-08",
          "url": "https://bankingjournal.aba.com/2025/08/mass-ag-reaches-settlement-with-earnest-operations-for-2-5m-over-ai-lending-bias/"
        },
        {
          "title": "National Law Review: Massachusetts AG Settles with Student Loan Lender Over AI-Based Fair Lending Violations",
          "type": "news",
          "date": "2025-07",
          "url": "https://natlawreview.com/article/massachusetts-ag-settles-student-loan-lender-ai-based-fair-lending-violations"
        }
      ],
      "outcomes": {
        "financial_loss": "$2.5 million settlement",
        "regulatory_action": "Massachusetts AG $2.5 million settlement requiring algorithmic modifications to underwriting models and enhanced fair lending compliance measures"
      }
    },
    {
      "id": "INC-25-0041",
      "title": "Tennessee Grandmother Wrongfully Arrested by Facial Recognition — Jailed 108 Days, Lost Home",
      "slug": "grandmother-frt-wrongful-arrest-108-days",
      "url": "https://topaithreats.com/incidents/INC-25-0041-grandmother-frt-wrongful-arrest-108-days/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2025-07",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "law-enforcement"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "indirectly-affected"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "over-automation",
        "inadequate-human-oversight",
        "training-data-bias"
      ],
      "assets_involved": [
        "biometric-data"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "privacy-surveillance",
          "pattern": "biometric-exploitation"
        },
        "secondary": [
          {
            "domain": "discrimination-social-harm",
            "pattern": "allocational-harm"
          }
        ]
      },
      "sources": [
        {
          "title": "Angela Lipps: grandmother wrongfully arrested by facial recognition",
          "type": "news",
          "date": "2026-03",
          "url": "https://boingboing.net"
        },
        {
          "title": "108 days jailed by facial recognition error",
          "type": "news",
          "date": "2026-03",
          "url": "https://www.theguardian.com"
        },
        {
          "title": "FRT wrongful arrest: lost home, car, dog",
          "type": "news",
          "date": "2026-03",
          "url": "https://www.tomshardware.com"
        },
        {
          "title": "Lipps released in North Dakota winter with nothing",
          "type": "news",
          "date": "2026-03",
          "url": "https://www.wowt.com"
        }
      ],
      "outcomes": {
        "recovery": "Released after 108 days; lost home, car, and dog",
        "regulatory_action": ""
      }
    },
    {
      "id": "INC-25-0045",
      "title": "Kimsuky APT Uses ChatGPT to Generate Fake South Korean Military IDs for Espionage Campaign",
      "slug": "kimsuky-deepfake-military-ids",
      "url": "https://topaithreats.com/incidents/INC-25-0045-kimsuky-deepfake-military-ids/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2025-07",
      "last_updated": "2026-03-29",
      "regions": [
        "asia"
      ],
      "sectors": [
        "government",
        "technology"
      ],
      "affected_groups": [
        "national-security-systems"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "indirectly-affected"
      ],
      "impact_level": "global",
      "causal_factors": [
        "misconfigured-deployment",
        "social-engineering"
      ],
      "assets_involved": [
        "large-language-models",
        "identity-credentials"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "social-engineering-via-ai"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "Kimsuky APT uses ChatGPT for fake military IDs",
          "type": "news",
          "date": "2026",
          "url": "https://darkreading.com"
        },
        {
          "title": "North Korean hackers bypass ChatGPT safeguards for espionage",
          "type": "news",
          "date": "2026",
          "url": "https://therecord.media"
        },
        {
          "title": "Kimsuky campaign targeting NK studies researchers",
          "type": "research",
          "date": "2026",
          "url": "https://genians.co.kr"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": ""
      }
    },
    {
      "id": "INC-25-0004",
      "title": "EchoLeak: Zero-Click Prompt Injection in Microsoft 365 Copilot (CVE-2025-32711)",
      "slug": "echoleak-microsoft-copilot-prompt-injection",
      "url": "https://topaithreats.com/incidents/INC-25-0004-echoleak-microsoft-copilot-prompt-injection/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2025-06",
      "last_updated": "2026-02-21",
      "regions": [
        "global"
      ],
      "sectors": [
        "corporate",
        "cross-sector"
      ],
      "affected_groups": [
        "business-organizations",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models",
        "autonomous-agents"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "adversarial-evasion"
        },
        "secondary": [
          {
            "domain": "security-cyber",
            "pattern": "prompt-injection-attack"
          },
          {
            "domain": "security-cyber",
            "pattern": "model-inversion-data-extraction"
          }
        ]
      },
      "sources": [
        {
          "title": "HackTheBox: Inside CVE-2025-32711 (EchoLeak)",
          "type": "technical",
          "date": "2025-06",
          "url": "https://www.hackthebox.com/blog/cve-2025-32711-echoleak-copilot-vulnerability"
        },
        {
          "title": "The Hacker News: Zero-Click AI Vulnerability Exposes Microsoft 365 Copilot Data",
          "type": "news",
          "date": "2025-06",
          "url": "https://thehackernews.com/2025/06/zero-click-ai-vulnerability-exposes.html"
        },
        {
          "title": "arxiv: EchoLeak - First Real-World Zero-Click Prompt Injection Exploit",
          "type": "primary",
          "date": "2025-09",
          "url": "https://arxiv.org/abs/2509.10540"
        },
        {
          "title": "SANS NewsBites: M365 Copilot AI Prompt Injection Attack Patched",
          "type": "news",
          "date": "2025-06",
          "url": "https://www.sans.org/newsletters/newsbites/xxvii-45"
        }
      ],
      "outcomes": {}
    },
    {
      "id": "INC-25-0017",
      "title": "Anthropic Research Reveals AI Model Blackmail Behavior in Lab Scenarios",
      "slug": "anthropic-ai-blackmail-behavior-study",
      "url": "https://topaithreats.com/incidents/INC-25-0017-anthropic-ai-blackmail-behavior-study/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "corroborated",
      "date_occurred": "2025-06",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "society-at-large",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "insufficient-safety-testing",
        "model-opacity"
      ],
      "assets_involved": [
        "large-language-models",
        "foundation-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "systemic-catastrophic",
          "pattern": "strategic-misalignment"
        }
      },
      "sources": [
        {
          "title": "Anthropic: Agentic Misalignment Research",
          "type": "research",
          "date": "2025-06",
          "url": "https://www.anthropic.com/research/agentic-misalignment"
        },
        {
          "title": "Fortune: Leading AI models show up to 96% blackmail rate when goals threatened",
          "type": "news",
          "date": "2025-06",
          "url": "https://fortune.com/2025/06/23/ai-models-blackmail-existence-goals-threatened-anthropic-openai-xai-google/"
        },
        {
          "title": "TechCrunch: Anthropic says most AI models will resort to blackmail",
          "type": "news",
          "date": "2025-06",
          "url": "https://techcrunch.com/2025/06/20/anthropic-says-most-ai-models-not-just-claude-will-resort-to-blackmail/"
        }
      ],
      "outcomes": {
        "regulatory_action": "None; research finding, not a deployment incident"
      }
    },
    {
      "id": "INC-25-0025",
      "title": "AI Chatbot Suicide Risk: 20% Failure Rate in Stanford Study",
      "slug": "stanford-ai-mental-health-chatbot-suicide-risk",
      "url": "https://topaithreats.com/incidents/INC-25-0025-stanford-ai-mental-health-chatbot-suicide-risk/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2025-06",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states",
        "global"
      ],
      "sectors": [
        "healthcare",
        "technology"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators",
        "direct-users"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "insufficient-safety-testing",
        "regulatory-gap",
        "over-automation"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "overreliance-automation-bias"
        },
        "secondary": [
          {
            "domain": "information-integrity",
            "pattern": "misinformation-hallucinated-content"
          }
        ]
      },
      "sources": [
        {
          "title": "Stanford HAI: Exploring the Dangers of AI in Mental Health Care",
          "type": "research",
          "date": "2025-06",
          "url": "https://hai.stanford.edu/news/exploring-the-dangers-of-ai-in-mental-health-care"
        },
        {
          "title": "Stanford Report: New study warns of risks in AI mental health tools",
          "type": "research",
          "date": "2025-06",
          "url": "https://news.stanford.edu/stories/2025/06/ai-mental-health-care-tools-dangers-risks"
        },
        {
          "title": "Fast Company: AI therapy chatbots are unsafe and stigmatizing, a new Stanford study finds",
          "type": "news",
          "date": "2025-07",
          "url": "https://www.fastcompany.com/91368562/ai-therapy-chatbots-are-unsafe-and-stigmatizing-a-new-stanford-study-finds"
        }
      ],
      "outcomes": {}
    },
    {
      "id": "INC-25-0035",
      "title": "Three Chained Prompt Injection Vulnerabilities in Anthropic MCP Git Server",
      "slug": "anthropic-mcp-git-server-prompt-injection",
      "url": "https://topaithreats.com/incidents/INC-25-0035-anthropic-mcp-git-server-prompt-injection/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2025-06",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america",
        "europe"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "adversarial-targeting",
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "inadequate-access-controls",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "autonomous-agents"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "prompt-injection-attack"
        },
        "secondary": [
          {
            "domain": "agentic-autonomous",
            "pattern": "tool-misuse-privilege-escalation"
          }
        ]
      },
      "sources": [
        {
          "title": "Three Flaws in Anthropic MCP Git Server Enable Code Execution via Prompt Injection",
          "type": "news",
          "date": "2026-01",
          "url": "https://thehackernews.com/2026/01/three-flaws-in-anthropic-mcp-git-server.html"
        },
        {
          "title": "Anthropic MCP Git Server Prompt Injection Flaws Found",
          "type": "news",
          "date": "2026-01-20",
          "url": "https://www.theregister.com/2026/01/20/anthropic_prompt_injection_flaws/"
        }
      ],
      "outcomes": {
        "recovery": "Anthropic patched all three CVEs in mcp-server-git version 2025.12.18; git_init tool removed entirely",
        "other": "No evidence of active exploitation in the wild reported"
      }
    },
    {
      "id": "INC-25-0012",
      "title": "Zoox Robotaxi Collision and Software Recall in Las Vegas",
      "slug": "zoox-robotaxi-crash-recall",
      "url": "https://topaithreats.com/incidents/INC-25-0012-zoox-robotaxi-crash-recall/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "primary",
      "date_occurred": "2025-04",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "transportation",
        "technology"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "insufficient-safety-testing",
        "model-opacity"
      ],
      "assets_involved": [
        "autonomous-agents",
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "agentic-autonomous",
          "pattern": "goal-drift"
        },
        "secondary": [
          {
            "domain": "agentic-autonomous",
            "pattern": "specification-gaming"
          }
        ]
      },
      "sources": [
        {
          "title": "CNBC: Amazon's Zoox recalls 270 robotaxis after Las Vegas crash",
          "type": "news",
          "date": "2025-05",
          "url": "https://www.cnbc.com/2025/05/06/amazon-zoox-recall.html"
        }
      ],
      "outcomes": {
        "regulatory_action": "NHTSA recall of 270 vehicles; second recall of 258 vehicles earlier in 2025",
        "other": "Zoox paused all driverless operations"
      }
    },
    {
      "id": "INC-25-0024",
      "title": "Microsoft Reports Blocking $4 Billion in AI-Enabled Fraud Attempts",
      "slug": "microsoft-4b-ai-enabled-fraud-disruption",
      "url": "https://topaithreats.com/incidents/INC-25-0024-microsoft-4b-ai-enabled-fraud-disruption/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2025-04",
      "last_updated": "2026-03-13",
      "regions": [
        "global",
        "north-america",
        "europe"
      ],
      "sectors": [
        "technology",
        "finance",
        "corporate"
      ],
      "affected_groups": [
        "general-public",
        "business-organizations"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users",
        "indirectly-affected"
      ],
      "impact_level": "global",
      "causal_factors": [
        "intentional-fraud",
        "social-engineering"
      ],
      "assets_involved": [
        "voice-synthesis",
        "identity-credentials",
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "ai-morphed-malware"
        },
        "secondary": [
          {
            "domain": "information-integrity",
            "pattern": "ai-enabled-fraud"
          },
          {
            "domain": "security-cyber",
            "pattern": "social-engineering-via-ai"
          },
          {
            "domain": "information-integrity",
            "pattern": "deepfake-identity-hijacking"
          }
        ]
      },
      "sources": [
        {
          "title": "Microsoft Security Blog: Cyber Signals Issue 9 — AI-powered deception: Emerging fraud threats and countermeasures",
          "type": "primary",
          "date": "2025-04",
          "url": "https://www.microsoft.com/en-us/security/blog/2025/04/16/cyber-signals-issue-9-ai-powered-deception-emerging-fraud-threats-and-countermeasures/"
        },
        {
          "title": "AI News: Alarming rise in AI-powered scams — Microsoft reveals $4B in thwarted fraud",
          "type": "news",
          "date": "2025-04",
          "url": "https://www.artificialintelligence-news.com/news/alarming-rise-in-ai-powered-scams-microsoft-reveals-4-billion-in-thwarted-fraud/"
        },
        {
          "title": "Cyber Magazine: Microsoft Tackles Cyber Scams With AI-Powered Defences",
          "type": "news",
          "date": "2025-04",
          "url": "https://cybermagazine.com/technology-and-ai/microsofts-tools-against-the-rise-of-ai-powered-scams"
        }
      ],
      "outcomes": {}
    },
    {
      "id": "INC-25-0030",
      "title": "OpenAI o3 Reward Hacking in METR Safety Evaluation",
      "slug": "openai-o3-reward-hacking-metr-evaluation",
      "url": "https://topaithreats.com/incidents/INC-25-0030-openai-o3-reward-hacking-metr-evaluation/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2025-04",
      "last_updated": "2026-03-28",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "society-at-large"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "model-opacity",
        "competitive-pressure"
      ],
      "assets_involved": [
        "large-language-models",
        "autonomous-agents"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "agentic-autonomous",
          "pattern": "specification-gaming"
        },
        "secondary": [
          {
            "domain": "agentic-autonomous",
            "pattern": "goal-drift"
          }
        ]
      },
      "sources": [
        {
          "title": "Recent Frontier Models Are Reward Hacking — METR",
          "type": "primary",
          "date": "2025-06-05",
          "url": "https://metr.org/blog/2025-06-05-recent-reward-hacking/"
        },
        {
          "title": "OpenAI partner says it had relatively little time to test the company's o3 AI model — TechCrunch",
          "type": "news",
          "date": "2025-04-16",
          "url": "https://techcrunch.com/2025/04/16/openai-partner-says-it-had-relatively-little-time-to-test-the-companys-new-ai-models/"
        },
        {
          "title": "Safety assessments show that OpenAI's o3 is probably the company's riskiest AI model to date — The Decoder",
          "type": "news",
          "date": "2025-04",
          "url": "https://the-decoder.com/safety-assessments-show-that-openais-o3-is-probably-the-companys-riskiest-ai-model-to-date/"
        }
      ],
      "outcomes": {
        "other": "METR documented the behaviors and published detailed analysis. Without correcting for reward hacking, o3's performance metrics would have been significantly inflated — its RE-Bench score would have appeared 'well beyond expert performance.'"
      }
    },
    {
      "id": "INC-25-0032",
      "title": "DOGE Uses ChatGPT to Flag and Cancel Federal Humanities Grants",
      "slug": "doge-chatgpt-dei-grant-cancellations",
      "url": "https://topaithreats.com/incidents/INC-25-0032-doge-chatgpt-dei-grant-cancellations/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2025-04",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "government",
        "education"
      ],
      "affected_groups": [
        "general-public",
        "workers",
        "vulnerable-communities",
        "democratic-institutions"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "regulators-public-servants"
      ],
      "impact_level": "institution",
      "causal_factors": [
        "insufficient-safety-testing",
        "over-automation",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "discrimination-social-harm",
          "pattern": "proxy-discrimination"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "overreliance-automation-bias"
          }
        ]
      },
      "sources": [
        {
          "title": "Discovery Released in Lawsuit by Humanities Groups Reveals ChatGPT-Powered Process by DOGE",
          "type": "primary",
          "date": "2026-03-06",
          "url": "https://www.prnewswire.com/news-releases/discovery-released-in-lawsuit-by-humanities-groups-reveals-chatgpt-powered-process-by-doge-in-cancelling-grants-for-schools-libraries-and-community-organizations-302707495.html"
        },
        {
          "title": "ACLS, AHA, and MLA File Motion for Summary Judgment to Restore Previous NEH Function and Funding",
          "type": "primary",
          "date": "2026-03-06",
          "url": "https://www.acls.org/news/acls-aha-and-mla-file-motion-for-summary-judgment-to-restore-previous-neh-function-and-funding/"
        },
        {
          "title": "Lawsuit says DOGE used ChatGPT to tag Jewish-themed humanities grants as 'DEI' before canceling them",
          "type": "news",
          "date": "2026-03-09",
          "url": "https://www.jta.org/2026/03/09/united-states/lawsuit-says-doge-used-chatgpt-to-tag-jewish-themed-humanities-grants-as-dei-before-canceling-them"
        },
        {
          "title": "DOGE Employees Used ChatGPT to Cancel NEH Grants, Lawsuits Allege",
          "type": "news",
          "date": "2026-03-07",
          "url": "https://www.artforum.com/news/doge-allegedly-used-chatgpt-to-cancel-humanities-grants-1234745040/"
        }
      ],
      "outcomes": {}
    },
    {
      "id": "INC-25-0031",
      "title": "MINJA: Memory Injection Attack Against RAG-Augmented LLM Agents",
      "slug": "minja-memory-injection-attack-research",
      "url": "https://topaithreats.com/incidents/INC-25-0031-minja-memory-injection-attack-research/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "primary",
      "date_occurred": "2025-03",
      "last_updated": "2026-03-07",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology",
        "healthcare",
        "cross-sector"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "business-organizations"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "autonomous-agents",
        "training-datasets"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "agentic-autonomous",
          "pattern": "memory-poisoning"
        },
        "secondary": [
          {
            "domain": "security-cyber",
            "pattern": "data-poisoning"
          }
        ]
      },
      "sources": [
        {
          "title": "MINJA: Memory INJection Attack Against RAG-Augmented LLM Agents (arXiv)",
          "type": "primary",
          "date": "2025-03",
          "url": "https://arxiv.org/html/2503.03704v1"
        }
      ],
      "outcomes": {}
    },
    {
      "id": "INC-25-0028",
      "title": "Google Gemini Long-Term Memory Corruption via Prompt Injection",
      "slug": "gemini-memory-corruption-prompt-injection",
      "url": "https://topaithreats.com/incidents/INC-25-0028-gemini-memory-corruption-prompt-injection/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2025-02",
      "last_updated": "2026-03-28",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction",
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "prompt-injection-attack"
        },
        "secondary": [
          {
            "domain": "agentic-autonomous",
            "pattern": "memory-poisoning"
          }
        ]
      },
      "sources": [
        {
          "title": "Hacking Gemini's Memory with Prompt Injection and Delayed Tool Invocation — Embrace The Red",
          "type": "primary",
          "date": "2025-02-11",
          "url": "https://embracethered.com/blog/posts/2025/gemini-memory-persistence-prompt-injection/"
        },
        {
          "title": "Google Gemini's Long-term Memory Vulnerable to a Kind of Phishing Attack — InfoQ",
          "type": "news",
          "date": "2025-02",
          "url": "https://www.infoq.com/news/2025/02/gemini-long-term-memory-attack/"
        },
        {
          "title": "Hackers Exploit Prompt Injection to Tamper with Gemini AI's Long-Term Memory — CybersecurityNews",
          "type": "news",
          "date": "2025-02",
          "url": "https://cybersecuritynews.com/hackers-exploit-gemini-prompt-injection/"
        }
      ],
      "outcomes": {
        "other": "Security: Long-term memory corruption enables persistent manipulation of the assistant's behavior across sessions. Information integrity: Gemini stores and reuses fabricated personal attributes about the user, degrading response quality for all future interactions. Human-AI control: Users lose control over their represented identity within the system without awareness. Google assessed the vulnerability impact as 'low,' noting it requires phishing and that Gemini notifies users when new memories are stored — however, the trigger words ('yes,' 'no,' 'sure') appear in nearly every conversation, making the attack highly practical."
      }
    },
    {
      "id": "INC-25-0029",
      "title": "Chain-of-Thought Reasoning Jailbreak Exploits Thinking Models",
      "slug": "chain-of-thought-jailbreak-reasoning-models",
      "url": "https://topaithreats.com/incidents/INC-25-0029-chain-of-thought-jailbreak-reasoning-models/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2025-02",
      "last_updated": "2026-03-28",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "general-public"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "insufficient-safety-testing",
        "adversarial-attack"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "jailbreak-guardrail-bypass"
        },
        "secondary": [
          {
            "domain": "security-cyber",
            "pattern": "adversarial-evasion"
          }
        ]
      },
      "sources": [
        {
          "title": "H-CoT: Hijacking the Chain-of-Thought Safety Reasoning Mechanism to Jailbreak Large Reasoning Models — arXiv",
          "type": "primary",
          "date": "2025-02-18",
          "url": "https://arxiv.org/abs/2502.12893"
        },
        {
          "title": "AI Hijacked: New Jailbreak Exploits Chain-of-Thought — BankInfoSecurity",
          "type": "news",
          "date": "2025-02",
          "url": "https://www.bankinfosecurity.com/ai-hijacked-new-jailbreak-exploits-chain-of-thought-a-27594"
        },
        {
          "title": "Investigating LLM Jailbreaking of Popular Generative AI Web Products — Unit 42 (Palo Alto Networks)",
          "type": "analysis",
          "date": "2025-02-21",
          "url": "https://unit42.paloaltonetworks.com/jailbreaking-generative-ai-web-products/"
        }
      ],
      "outcomes": {
        "other": "Research disclosed to affected model providers. As of the last update on 2026-03-28, no patches fully address the fundamental vulnerability — exposed chain-of-thought reasoning creates an inherent attack surface in reasoning models."
      }
    },
    {
      "id": "INC-25-0002",
      "title": "Italian Data Protection Authority Fines OpenAI EUR 15 Million Over ChatGPT GDPR Violations",
      "slug": "italy-fines-openai-chatgpt",
      "url": "https://topaithreats.com/incidents/INC-25-0002-italy-fines-openai-chatgpt/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2025-01",
      "last_updated": "2026-02-15",
      "regions": [
        "europe"
      ],
      "sectors": [
        "corporate"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "regulators-public-servants"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "regulatory-gap",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models",
        "training-datasets"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "privacy-surveillance",
          "pattern": "behavioral-profiling-without-consent"
        },
        "secondary": [
          {
            "domain": "discrimination-social-harm",
            "pattern": "data-imbalance-bias"
          }
        ]
      },
      "sources": [
        {
          "title": "Garante per la Protezione dei Dati Personali: ChatGPT, il Garante privacy sanziona OpenAI",
          "type": "primary",
          "date": "2025-01",
          "url": "https://www.garanteprivacy.it/home/docweb/-/docweb-display/docweb/10097490"
        },
        {
          "title": "Reuters: Italy fines OpenAI 15 million euros over ChatGPT data collection",
          "type": "news",
          "date": "2025-01",
          "url": "https://www.reuters.com/technology/artificial-intelligence/italy-fines-openai-15-mln-euros-over-chatgpt-data-collection-2025-01-20/"
        },
        {
          "title": "TechCrunch: Italy's privacy watchdog fines OpenAI EUR 15M over ChatGPT's data practices",
          "type": "news",
          "date": "2025-01",
          "url": "https://techcrunch.com/2025/01/20/italys-privacy-watchdog-fines-openai-e15m-over-chatgpts-data-practices/"
        }
      ],
      "outcomes": {
        "financial_loss": "EUR 15 million fine imposed on OpenAI",
        "arrests": "None; regulatory enforcement action",
        "recovery": "OpenAI ordered to conduct six-month public communication campaign in Italy",
        "regulatory_action": "EUR 15 million fine; mandated public communication campaign; ongoing compliance obligations"
      }
    },
    {
      "id": "INC-25-0003",
      "title": "DeepSeek R1 Data Exposure and International Bans Over Privacy and Security Concerns",
      "slug": "deepseek-data-privacy-concerns",
      "url": "https://topaithreats.com/incidents/INC-25-0003-deepseek-data-privacy-concerns/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2025-01",
      "last_updated": "2026-02-15",
      "regions": [
        "asia",
        "europe",
        "north-america"
      ],
      "sectors": [
        "corporate",
        "government"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "misconfigured-deployment",
        "inadequate-access-controls",
        "regulatory-gap"
      ],
      "assets_involved": [
        "large-language-models",
        "training-datasets"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "privacy-surveillance",
          "pattern": "mass-surveillance-amplification"
        },
        "secondary": [
          {
            "domain": "security-cyber",
            "pattern": "model-inversion-data-extraction"
          }
        ]
      },
      "sources": [
        {
          "title": "Wiz Research: DeepSeek AI Database Exposed: Over 1 Million Log Lines, Chat History, and Sensitive Data Leaked",
          "type": "primary",
          "date": "2025-01",
          "url": "https://www.wiz.io/blog/wiz-research-uncovers-exposed-deepseek-database-leak"
        },
        {
          "title": "Garante per la Protezione dei Dati Personali: ChatGPT, il Garante blocca DeepSeek",
          "type": "primary",
          "date": "2025-01",
          "url": "https://www.garanteprivacy.it/home/docweb/-/docweb-display/docweb/10097860"
        },
        {
          "title": "Reuters: Italy bans Chinese AI app DeepSeek over data privacy concerns",
          "type": "news",
          "date": "2025-01",
          "url": "https://www.reuters.com/technology/artificial-intelligence/italy-bans-chinese-ai-app-deepseek-over-data-privacy-concerns-2025-01-30/"
        },
        {
          "title": "BBC News: DeepSeek: Countries move to ban or restrict Chinese AI chatbot",
          "type": "news",
          "date": "2025-02",
          "url": "https://www.bbc.com/news/articles/cx2nw4g8xp3o"
        }
      ],
      "outcomes": {
        "financial_loss": "Not quantified; significant reputational and market access impact",
        "arrests": "None",
        "recovery": "Exposed database secured after Wiz notification; service blocked in Italy; government bans enacted in multiple countries",
        "regulatory_action": "Italian Garante blocked the service; multiple countries imposed government device bans; ongoing regulatory scrutiny"
      }
    },
    {
      "id": "INC-25-0018",
      "title": "Las Vegas Cybertruck Bomber Used ChatGPT for Explosives Information",
      "slug": "las-vegas-cybertruck-chatgpt-explosives",
      "url": "https://topaithreats.com/incidents/INC-25-0018-las-vegas-cybertruck-chatgpt-explosives/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2025-01",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "public-safety"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "weaponization",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "adversarial-evasion"
        },
        "secondary": [
          {
            "domain": "security-cyber",
            "pattern": "jailbreak-guardrail-bypass"
          }
        ]
      },
      "sources": [
        {
          "title": "NPR: Las Vegas Cybertruck explosion driver used ChatGPT in planning, police say",
          "type": "news",
          "date": "2025-01",
          "url": "https://www.npr.org/2025/01/07/nx-s1-5251611/cybertruck-explosion-las-vegas-chatgpt-ai"
        },
        {
          "title": "CNN: Green Beret who exploded Cybertruck in Las Vegas used AI to plan blast",
          "type": "news",
          "date": "2025-01",
          "url": "https://www.cnn.com/2025/01/07/us/las-vegas-cybertruck-explosion-livelsberger/index.html"
        },
        {
          "title": "CBS News: Tesla Cybertruck bomber used ChatGPT to plan Las Vegas attack, police say",
          "type": "news",
          "date": "2025-01",
          "url": "https://www.cbsnews.com/news/las-vegas-cybertruck-explosion-fire-chatgpt-plan/"
        }
      ],
      "outcomes": {}
    },
    {
      "id": "INC-25-0027",
      "title": "Medical LLM Data Poisoning Produces Undetectable Harmful Content",
      "slug": "medical-llm-data-poisoning-nature-study",
      "url": "https://topaithreats.com/incidents/INC-25-0027-medical-llm-data-poisoning-nature-study/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2025-01",
      "last_updated": "2026-03-28",
      "regions": [
        "global"
      ],
      "sectors": [
        "healthcare",
        "technology"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "training-data-bias",
        "adversarial-attack",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "training-datasets",
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "data-poisoning"
        },
        "secondary": [
          {
            "domain": "information-integrity",
            "pattern": "misinformation-hallucinated-content"
          },
          {
            "domain": "security-cyber",
            "pattern": "adversarial-evasion"
          }
        ]
      },
      "sources": [
        {
          "title": "Medical large language models are vulnerable to data-poisoning attacks — Nature Medicine",
          "type": "primary",
          "date": "2025-01-08",
          "url": "https://www.nature.com/articles/s41591-024-03445-1"
        },
        {
          "title": "PubMed Entry: Medical LLM Data Poisoning Study",
          "type": "primary",
          "date": "2025-01",
          "url": "https://pubmed.ncbi.nlm.nih.gov/39779928/"
        },
        {
          "title": "AHRQ Patient Safety Network: Medical LLMs Vulnerable to Data-Poisoning Attacks",
          "type": "analysis",
          "date": "2025",
          "url": "https://psnet.ahrq.gov/issue/medical-large-language-models-are-vulnerable-data-poisoning-attacks"
        }
      ],
      "outcomes": {
        "other": "Researchers proposed a three-stage mitigation using the BIOS biomedical knowledge graph (pruned to 21,706 concepts) with UMLS Metathesaurus synonym resolution, capturing 91.9% of harmful content at passage level (F1 = 85.7%). The defense operates on model outputs, not training data — the authors note there is no realistic way to retroactively detect and remove misinformation from public training corpora. No real-world deployment was involved — this was a controlled research demonstration."
      }
    },
    {
      "id": "INC-25-0034",
      "title": "Chinese AI Labs Conduct Industrial-Scale Distillation Attacks Against Claude",
      "slug": "chinese-labs-claude-distillation-attacks",
      "url": "https://topaithreats.com/incidents/INC-25-0034-chinese-labs-claude-distillation-attacks/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2025",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "asia"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "inadequate-access-controls",
        "weaponization",
        "competitive-pressure"
      ],
      "assets_involved": [
        "large-language-models",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "model-inversion-data-extraction"
        },
        "secondary": [
          {
            "domain": "security-cyber",
            "pattern": "ai-supply-chain-attack"
          },
          {
            "domain": "systemic-catastrophic",
            "pattern": "ai-capability-proliferation"
          }
        ]
      },
      "sources": [
        {
          "title": "Detecting and Preventing Distillation Attacks",
          "type": "primary",
          "date": "2026-02-23",
          "url": "https://www.anthropic.com/news/detecting-and-preventing-distillation-attacks"
        },
        {
          "title": "Anthropic accuses DeepSeek, Moonshot and MiniMax of distillation attacks on Claude",
          "type": "news",
          "date": "2026-02-24",
          "url": "https://www.cnbc.com/2026/02/24/anthropic-openai-china-firms-distillation-deepseek.html"
        },
        {
          "title": "Anthropic accuses Chinese AI labs of mining Claude as US debates AI chip exports",
          "type": "news",
          "date": "2026-02-23",
          "url": "https://techcrunch.com/2026/02/23/anthropic-accuses-chinese-ai-labs-of-mining-claude-as-us-debates-ai-chip-exports/"
        }
      ],
      "outcomes": {}
    },
    {
      "id": "INC-25-0040",
      "title": "IWF Reports AI-Generated CSAM Videos Increase 26,385% with 65% at Highest Severity",
      "slug": "iwf-ai-csam-video-explosion",
      "url": "https://topaithreats.com/incidents/INC-25-0040-iwf-ai-csam-video-explosion/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2025",
      "last_updated": "2026-03-29",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology",
        "law-enforcement"
      ],
      "affected_groups": [
        "children",
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "indirectly-affected"
      ],
      "impact_level": "global",
      "causal_factors": [
        "misconfigured-deployment",
        "insufficient-safety-testing",
        "regulatory-gap"
      ],
      "assets_involved": [
        "generative-image-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "discrimination-social-harm",
          "pattern": "representational-harm"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "IWF Annual Report: AI-generated CSAM statistics",
          "type": "research",
          "date": "2026-03",
          "url": "https://www.iwf.org.uk"
        },
        {
          "title": "AI CSAM videos increase 26,385%",
          "type": "news",
          "date": "2026-03",
          "url": "https://www.euronews.com"
        },
        {
          "title": "NCMEC receives 1M+ CSAM reports in 9 months",
          "type": "news",
          "date": "2026-03",
          "url": "https://www.cbsnews.com"
        },
        {
          "title": "65% of AI CSAM classified Category A",
          "type": "news",
          "date": "2026-03",
          "url": "https://www.nbcnews.com"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "IWF report informing policy discussions; DEFIANCE Act and state-level legislation"
      }
    },
    {
      "id": "INC-25-0042",
      "title": "UN Report — AI Weaponized by Southeast Asian Organized Crime for $18-37B in Fraud",
      "slug": "un-ai-weaponized-se-asian-crime",
      "url": "https://topaithreats.com/incidents/INC-25-0042-un-ai-weaponized-se-asian-crime/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2025",
      "last_updated": "2026-03-29",
      "regions": [
        "asia"
      ],
      "sectors": [
        "finance",
        "law-enforcement"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "indirectly-affected"
      ],
      "impact_level": "global",
      "causal_factors": [
        "weaponization",
        "regulatory-gap",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "voice-synthesis",
        "generative-image-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "deepfake-identity-hijacking"
        },
        "secondary": [
          {
            "domain": "information-integrity",
            "pattern": "ai-enabled-fraud"
          }
        ]
      },
      "sources": [
        {
          "title": "UNODC: AI weaponized by Southeast Asian organized crime",
          "type": "government",
          "date": "2026-03",
          "url": "https://news.un.org/en/story/2026/03/1167144"
        },
        {
          "title": "UNODC report on AI-powered fraud in East and Southeast Asia",
          "type": "research",
          "date": "2026-03",
          "url": "https://unodc.org"
        },
        {
          "title": "Scam compounds hiring 'AI models' for deepfake video call fraud",
          "type": "news",
          "date": "2026-03",
          "url": "https://malwarebytes.com/blog/news/2026/03/scam-compounds"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "UNODC report published; international awareness raised"
      }
    },
    {
      "id": "INC-25-0044",
      "title": "NYPD Facial Recognition Wrongful Arrest — Brooklyn Father Jailed 2 Days Despite 8-Inch Height Difference",
      "slug": "nypd-frt-wrongful-arrest-williams",
      "url": "https://topaithreats.com/incidents/INC-25-0044-nypd-frt-wrongful-arrest-williams/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2025",
      "last_updated": "2026-03-29",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "law-enforcement"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "indirectly-affected"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "over-automation",
        "inadequate-human-oversight",
        "training-data-bias"
      ],
      "assets_involved": [
        "biometric-data"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "discrimination-social-harm",
          "pattern": "allocational-harm"
        },
        "secondary": [
          {
            "domain": "privacy-surveillance",
            "pattern": "biometric-exploitation"
          }
        ]
      },
      "sources": [
        {
          "title": "NYPD facial recognition wrongful arrest of Brooklyn father",
          "type": "news",
          "date": "2026",
          "url": "https://abc7ny.com"
        },
        {
          "title": "Williams wrongful arrest: suspect was 8 inches shorter",
          "type": "news",
          "date": "2026",
          "url": "https://cbsnews.com/newyork"
        },
        {
          "title": "7th known NYPD FRT wrongful arrest in 5 years",
          "type": "news",
          "date": "2026",
          "url": "https://afrotech.com"
        }
      ],
      "outcomes": {
        "recovery": "Released after 2 days; identified as 7th known NYPD FRT wrongful arrest",
        "regulatory_action": ""
      }
    },
    {
      "id": "INC-25-0047",
      "title": "Mistral Pixtral Models Fail Safety Tests — 60x More Likely to Generate CSAM Than GPT-4o",
      "slug": "mistral-pixtral-safety-failures",
      "url": "https://topaithreats.com/incidents/INC-25-0047-mistral-pixtral-safety-failures/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2025",
      "last_updated": "2026-03-29",
      "regions": [
        "europe",
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "society-at-large",
        "children"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "global",
      "causal_factors": [
        "insufficient-safety-testing",
        "competitive-pressure"
      ],
      "assets_involved": [
        "large-language-models",
        "foundation-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "jailbreak-guardrail-bypass"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "Mistral Pixtral safety test failures: CSAM and CBRN risks",
          "type": "research",
          "date": "2026",
          "url": "https://bankinfosecurity.com"
        },
        {
          "title": "Pixtral safety testing results: 60x CSAM, 40x CBRN vs competitors",
          "type": "research",
          "date": "2026",
          "url": "https://enkryptai.com"
        },
        {
          "title": "Mistral models describe nerve agent modifications",
          "type": "news",
          "date": "2026",
          "url": "https://euronews.com"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": ""
      }
    },
    {
      "id": "INC-24-0027",
      "title": "Waymo Robotaxi Collides with Serve Delivery Robot in Los Angeles",
      "slug": "waymo-serve-robot-autonomous-collision",
      "url": "https://topaithreats.com/incidents/INC-24-0027-waymo-serve-robot-autonomous-collision/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "corroborated",
      "date_occurred": "2024-12-27",
      "last_updated": "2026-03-28",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "transportation"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "deployers-operators"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "insufficient-safety-testing",
        "over-automation"
      ],
      "assets_involved": [
        "autonomous-agents"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "agentic-autonomous",
          "pattern": "multi-agent-coordination-failures"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "overreliance-automation-bias"
          }
        ]
      },
      "sources": [
        {
          "title": "A Waymo robotaxi and a Serve delivery robot collided in Los Angeles — TechCrunch",
          "type": "news",
          "date": "2024-12-31",
          "url": "https://techcrunch.com/2024/12/31/a-waymo-robotaxi-and-a-serve-delivery-robot-collided-in-los-angeles/"
        }
      ],
      "outcomes": {
        "other": "Neither vehicle was damaged. Waymo confirmed its system detected the delivery robot but classified it as an 'inanimate object,' applying less caution than it would for a pedestrian. Serve Robotics confirmed the delivery bot was under remote supervisor control at the time."
      }
    },
    {
      "id": "INC-24-0013",
      "title": "Romania Presidential Election Annulled After AI-Enabled Manipulation",
      "slug": "romania-election-annulment-ai-manipulation",
      "url": "https://topaithreats.com/incidents/INC-24-0013-romania-election-annulment-ai-manipulation/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2024-11",
      "last_updated": "2026-03-10",
      "regions": [
        "europe"
      ],
      "sectors": [
        "government",
        "media"
      ],
      "affected_groups": [
        "democratic-institutions",
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact",
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "indirectly-affected",
        "regulators-public-servants"
      ],
      "impact_level": "institution",
      "causal_factors": [
        "platform-manipulation",
        "misconfigured-deployment"
      ],
      "assets_involved": [
        "content-platforms",
        "recommender-systems"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "consensus-reality-erosion"
        },
        "secondary": [
          {
            "domain": "information-integrity",
            "pattern": "disinformation-campaigns"
          },
          {
            "domain": "privacy-surveillance",
            "pattern": "behavioral-profiling-without-consent"
          }
        ]
      },
      "sources": [
        {
          "title": "Romania Annuls Presidential Election Results",
          "type": "primary",
          "date": "2024-12",
          "url": "https://www.cnn.com/2024/12/06/europe/romania-annuls-presidential-election-intl/index.html"
        },
        {
          "title": "What Happened on TikTok Around the Annulled Romanian Election",
          "type": "primary",
          "date": "2024-12",
          "url": "https://globalwitness.org/en/campaigns/digital-threats/what-happened-on-tiktok-around-the-annulled-romanian-presidential-election-an-investigation-and-poll/"
        },
        {
          "title": "Romania Cancels Presidential Election Over Alleged Russian Interference",
          "type": "secondary",
          "date": "2024-12",
          "url": "https://thehackernews.com/2024/12/romania-cancels-presidential-election.html"
        }
      ],
      "outcomes": null
    },
    {
      "id": "INC-24-0021",
      "title": "Cruise Robotaxi Criminal False Reporting After Pedestrian Dragging",
      "slug": "cruise-robotaxi-criminal-false-reporting",
      "url": "https://topaithreats.com/incidents/INC-24-0021-cruise-robotaxi-criminal-false-reporting/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2024-09",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "transportation",
        "technology"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "regulators-public-servants"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "intentional-fraud",
        "accountability-vacuum",
        "regulatory-gap"
      ],
      "assets_involved": [
        "autonomous-agents",
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "unsafe-human-in-the-loop-failures"
        },
        "secondary": [
          {
            "domain": "agentic-autonomous",
            "pattern": "goal-drift"
          }
        ]
      },
      "sources": [
        {
          "title": "NHTSA Consent Order: Cruise Crash Reporting",
          "type": "primary",
          "date": "2024-09",
          "url": "https://www.nhtsa.gov/press-releases/consent-order-cruise-crash-reporting"
        }
      ],
      "outcomes": {
        "financial_loss": "$2 million in combined civil and criminal fines",
        "regulatory_action": "NHTSA $1.5M civil penalty; $500,000 DOJ criminal fine; California DMV permit suspension",
        "other": "GM shut down the Cruise robotaxi program; multiple executives departed"
      }
    },
    {
      "id": "INC-24-0011",
      "title": "EU AI Act Enters Into Force as World's First Comprehensive AI Regulation",
      "slug": "eu-ai-act-enters-into-force",
      "url": "https://topaithreats.com/incidents/INC-24-0011-eu-ai-act-enters-into-force/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "primary",
      "date_occurred": "2024-08",
      "last_updated": "2026-02-15",
      "regions": [
        "europe"
      ],
      "sectors": [
        "government",
        "regulation"
      ],
      "affected_groups": [
        "business-organizations",
        "government-institutions",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "regulators-public-servants",
        "developers-providers"
      ],
      "impact_level": "global",
      "causal_factors": [
        "regulatory-gap"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "systemic-catastrophic",
          "pattern": "strategic-misalignment"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "loss-of-human-agency"
          }
        ]
      },
      "sources": [
        {
          "title": "Regulation (EU) 2024/1689 of the European Parliament and of the Council (EU AI Act)",
          "type": "primary",
          "date": "2024-07-12",
          "url": "https://eur-lex.europa.eu/eli/reg/2024/1689/oj"
        },
        {
          "title": "European Commission: AI Act enters into force",
          "type": "primary",
          "date": "2024-08-01",
          "url": "https://digital-strategy.ec.europa.eu/en/policies/regulatory-framework-ai"
        },
        {
          "title": "Reuters: EU's AI Act enters into force, sets global precedent",
          "type": "news",
          "date": "2024-08",
          "url": "https://www.reuters.com/technology/artificial-intelligence/eus-ai-act-enters-into-force-2024-08-01/"
        },
        {
          "title": "Politico: The EU's sweeping AI law is now in force. Here's what it means.",
          "type": "analysis",
          "date": "2024-08",
          "url": "https://www.politico.eu/article/eu-ai-act-artificial-intelligence-regulation-enters-into-force/"
        }
      ],
      "outcomes": {
        "financial_loss": "Not applicable; regulatory milestone",
        "arrests": "Not applicable",
        "recovery": "Not applicable",
        "regulatory_action": "Establishes world's first comprehensive AI regulatory framework with risk-based classification; penalties up to 35 million EUR or 7% of global turnover for violations"
      }
    },
    {
      "id": "INC-24-0015",
      "title": "Sakana AI Scientist Unexpectedly Modifies Own Code",
      "slug": "sakana-ai-scientist-self-modification",
      "url": "https://topaithreats.com/incidents/INC-24-0015-sakana-ai-scientist-self-modification/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2024-08",
      "last_updated": "2026-03-10",
      "regions": [
        "asia"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "society-at-large"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "misconfigured-deployment",
        "emergent-behavior"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "systemic-catastrophic",
          "pattern": "uncontrolled-recursive-self-improvement"
        },
        "secondary": [
          {
            "domain": "agentic-autonomous",
            "pattern": "specification-gaming"
          },
          {
            "domain": "agentic-autonomous",
            "pattern": "tool-misuse-privilege-escalation"
          }
        ]
      },
      "sources": [
        {
          "title": "The AI Scientist: Towards Fully Automated Open-Ended Scientific Discovery",
          "type": "primary",
          "date": "2024-08",
          "url": "https://sakana.ai/ai-scientist/"
        },
        {
          "title": "Research AI Model Unexpectedly Modified Its Own Code to Extend Runtime",
          "type": "secondary",
          "date": "2024-08",
          "url": "https://developers.slashdot.org/story/24/08/14/2047250/research-ai-model-unexpectedly-modified-its-own-code-to-extend-runtime"
        }
      ],
      "outcomes": null
    },
    {
      "id": "INC-24-0020",
      "title": "Slack AI Indirect Prompt Injection Data Exfiltration Vulnerability",
      "slug": "slack-ai-prompt-injection-exfiltration",
      "url": "https://topaithreats.com/incidents/INC-24-0020-slack-ai-prompt-injection-exfiltration/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2024-08",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "business-organizations",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "adversarial-evasion"
        },
        "secondary": [
          {
            "domain": "security-cyber",
            "pattern": "prompt-injection-attack"
          },
          {
            "domain": "security-cyber",
            "pattern": "model-inversion-data-extraction"
          }
        ]
      },
      "sources": [
        {
          "title": "PromptArmor: Data Exfiltration from Slack AI via Indirect Prompt Injection",
          "type": "primary",
          "date": "2024-08",
          "url": "https://www.promptarmor.com/resources/data-exfiltration-from-slack-ai-via-indirect-prompt-injection"
        }
      ],
      "outcomes": {
        "other": "Vulnerability patched by Salesforce; demonstrated fundamental challenge of integrating LLMs with enterprise data access controls"
      }
    },
    {
      "id": "INC-24-0014",
      "title": "Workday AI Hiring Tool Discrimination Class Action",
      "slug": "workday-ai-hiring-discrimination",
      "url": "https://topaithreats.com/incidents/INC-24-0014-workday-ai-hiring-discrimination/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2024-07",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "technology",
        "employment"
      ],
      "affected_groups": [
        "workers",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "training-data-bias",
        "insufficient-safety-testing",
        "model-opacity"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "discrimination-social-harm",
          "pattern": "allocational-harm"
        }
      },
      "sources": [
        {
          "title": "Fisher Phillips: Discrimination Lawsuit Over Workday's AI Hiring Tools Can Proceed as Class Action",
          "type": "legal",
          "date": "2025-05",
          "url": "https://www.fisherphillips.com/en/insights/insights/discrimination-lawsuit-over-workdays-ai-hiring-tools-can-proceed-as-class-action-6-things"
        }
      ],
      "outcomes": {
        "legal_outcome": "Class certified May 2025; case ongoing in U.S. federal court"
      }
    },
    {
      "id": "INC-24-0022",
      "title": "McDonald's McHire AI Hiring Platform Data Vulnerability",
      "slug": "mcdonalds-mchire-data-vulnerability",
      "url": "https://topaithreats.com/incidents/INC-24-0022-mcdonalds-mchire-data-vulnerability/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2024-06",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states",
        "global"
      ],
      "sectors": [
        "employment",
        "technology"
      ],
      "affected_groups": [
        "general-public",
        "workers"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "inadequate-access-controls",
        "misconfigured-deployment"
      ],
      "assets_involved": [
        "decision-automation",
        "identity-credentials"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "model-inversion-data-extraction"
        }
      },
      "sources": [
        {
          "title": "Forbes: McDonald's AI Breach Reveals The Dark Side Of Automated Recruitment",
          "type": "news",
          "date": "2025-07",
          "url": "https://www.forbes.com/sites/tonybradley/2025/07/15/mcdonalds-ai-breach-reveals-the-dark-side-of-automated-recruitment/"
        },
        {
          "title": "Cyber Magazine: How McDonald's AI Bot Exposed Millions of People's Data",
          "type": "news",
          "date": "2025-07",
          "url": "https://cybermagazine.com/news/how-mcdonalds-ai-bot-exposed-millions-of-peoples-data"
        },
        {
          "title": "Oasis Security: McDonald's AI Hiring Breach — Nonhuman Identity",
          "type": "research",
          "date": "2025-07",
          "url": "https://www.oasis.security/blog/mcdonalds-ai-hiring-breach-nonhuman-identity"
        }
      ],
      "outcomes": {
        "recovery": "Vulnerability patched; no confirmed mass data exfiltration",
        "regulatory_action": "None reported"
      }
    },
    {
      "id": "INC-24-0024",
      "title": "McDonald's Ends AI Drive-Thru Ordering Trial After Viral Order Errors",
      "slug": "mcdonalds-ai-drive-thru-ordering-failures",
      "url": "https://topaithreats.com/incidents/INC-24-0024-mcdonalds-ai-drive-thru-ordering-failures/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "corroborated",
      "date_occurred": "2024-06",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "corporate",
        "technology"
      ],
      "affected_groups": [
        "general-public",
        "workers"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators",
        "direct-users"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "insufficient-safety-testing",
        "over-automation"
      ],
      "assets_involved": [
        "large-language-models",
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "overreliance-automation-bias"
        }
      },
      "sources": [
        {
          "title": "CNBC: McDonald's to end AI drive-thru test with IBM",
          "type": "news",
          "date": "2024-06",
          "url": "https://www.cnbc.com/2024/06/17/mcdonalds-to-end-ibm-ai-drive-thru-test.html"
        },
        {
          "title": "CNN: McDonald's pulls AI ordering from drive-thrus",
          "type": "news",
          "date": "2024-06",
          "url": "https://www.cnn.com/2024/06/17/tech/mcdonalds-ai-drive-thru-program/"
        },
        {
          "title": "IBM Newsroom: Joint Statement from McDonald's and IBM",
          "type": "primary",
          "date": "2024-06",
          "url": "https://newsroom.ibm.com/Joint-Statement-from-McDonalds-and-IBM"
        }
      ],
      "outcomes": {}
    },
    {
      "id": "INC-24-0006",
      "title": "OpenAI Voice Mode Resembling Scarlett Johansson Without Consent",
      "slug": "openai-scarlett-johansson-voice",
      "url": "https://topaithreats.com/incidents/INC-24-0006-openai-scarlett-johansson-voice/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "corroborated",
      "date_occurred": "2024-05",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "corporate"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "accountability-vacuum",
        "competitive-pressure"
      ],
      "assets_involved": [
        "voice-synthesis"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "privacy-surveillance",
          "pattern": "biometric-exploitation"
        },
        "secondary": [
          {
            "domain": "information-integrity",
            "pattern": "deepfake-identity-hijacking"
          }
        ]
      },
      "sources": [
        {
          "title": "NPR: Scarlett Johansson Says She Is 'Shocked' by ChatGPT Voice That Sounds Like Her",
          "type": "news",
          "date": "2024-05",
          "url": "https://www.npr.org/2024/05/20/1252495087/openai-chatgpt-scarlett-johansson-voice"
        },
        {
          "title": "OpenAI Blog: How the Voice for Sky Was Chosen",
          "type": "primary",
          "date": "2024-05",
          "url": "https://openai.com/index/how-the-voice-for-sky-was-chosen/"
        },
        {
          "title": "The Washington Post: Scarlett Johansson Says OpenAI Ripped Off Her Voice for ChatGPT",
          "type": "news",
          "date": "2024-05",
          "url": "https://www.washingtonpost.com/technology/2024/05/20/scarlett-johansson-openai-chatgpt-voice/"
        },
        {
          "title": "The New York Times: Scarlett Johansson Says OpenAI Cloned Her Voice Without Consent",
          "type": "news",
          "date": "2024-05",
          "url": "https://www.nytimes.com/2024/05/20/technology/scarlett-johansson-openai-voice.html"
        }
      ],
      "outcomes": {
        "financial_loss": "Not publicly quantified",
        "arrests": "None",
        "recovery": "OpenAI paused use of the Sky voice",
        "regulatory_action": "No formal regulatory action; legal engagement between Johansson's counsel and OpenAI"
      }
    },
    {
      "id": "INC-24-0019",
      "title": "Windows Recall: Security and Privacy Flaw (2024)",
      "slug": "microsoft-windows-recall-privacy",
      "url": "https://topaithreats.com/incidents/INC-24-0019-microsoft-windows-recall-privacy/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2024-05",
      "last_updated": "2026-04-13",
      "regions": [
        "north-america",
        "united-states",
        "europe"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "inadequate-access-controls",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "privacy-surveillance",
          "pattern": "behavioral-profiling-without-consent"
        }
      },
      "sources": [
        {
          "title": "DoublePulsar Security Analysis of Windows Recall",
          "type": "primary",
          "date": "2024-06",
          "url": "https://doublepulsar.com/microsoft-recall-on-copilot-pc-testing-the-security-and-privacy-implications-ddb296093b6c"
        }
      ],
      "outcomes": {
        "regulatory_action": "UK Information Commissioner's Office sought clarification from Microsoft",
        "other": "Feature delayed from June 2024 launch; redesigned with opt-in consent, Windows Hello authentication, and encrypted storage"
      }
    },
    {
      "id": "INC-24-0023",
      "title": "Google AI Overviews Recommend Glue on Pizza and Eating Rocks",
      "slug": "google-ai-overviews-glue-rocks",
      "url": "https://topaithreats.com/incidents/INC-24-0023-google-ai-overviews-glue-rocks/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "primary",
      "date_occurred": "2024-05",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states",
        "global"
      ],
      "sectors": [
        "technology",
        "media"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "hallucination-tendency",
        "training-data-bias",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "misinformation-hallucinated-content"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "overreliance-automation-bias"
          }
        ]
      },
      "sources": [
        {
          "title": "Google Blog: What happened with AI Overviews and next steps",
          "type": "primary",
          "date": "2024-05",
          "url": "https://blog.google/products/search/ai-overviews-update-may-2024/"
        },
        {
          "title": "Washington Post: Why Google's AI search might recommend you mix glue into your pizza",
          "type": "news",
          "date": "2024-05",
          "url": "https://www.washingtonpost.com/technology/2024/05/24/google-ai-overviews-wrong/"
        },
        {
          "title": "MIT Technology Review: Why Google's AI Overviews gets things wrong",
          "type": "news",
          "date": "2024-05",
          "url": "https://www.technologyreview.com/2024/05/31/1093019/why-are-googles-ai-overviews-results-so-bad/"
        }
      ],
      "outcomes": {}
    },
    {
      "id": "INC-24-0016",
      "title": "SafeRent Algorithmic Housing Discrimination Settlement",
      "slug": "saferent-housing-discrimination-settlement",
      "url": "https://topaithreats.com/incidents/INC-24-0016-saferent-housing-discrimination-settlement/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2024-04",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "social-services"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "training-data-bias",
        "model-opacity"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "discrimination-social-harm",
          "pattern": "allocational-harm"
        }
      },
      "sources": [
        {
          "title": "Cohen Milstein: Rental Applicants Using Housing Vouchers Settle Ground-Breaking Discrimination Class Action Against SafeRent Solutions",
          "type": "primary",
          "date": "2024-04",
          "url": "https://www.cohenmilstein.com/rental-applicants-using-housing-vouchers-settle-ground-breaking-discrimination-class-action-against-saferent-solutions/"
        }
      ],
      "outcomes": {
        "financial_loss": "$2.275 million settlement",
        "legal_outcome": "Class action settlement with required algorithmic modifications; no court determination on liability"
      }
    },
    {
      "id": "INC-24-0018",
      "title": "India 2024 General Election Industrial-Scale Deepfake Campaign",
      "slug": "india-election-deepfake-campaign",
      "url": "https://topaithreats.com/incidents/INC-24-0018-india-election-deepfake-campaign/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2024-04",
      "last_updated": "2026-03-13",
      "regions": [
        "asia",
        "india"
      ],
      "sectors": [
        "elections",
        "media"
      ],
      "affected_groups": [
        "general-public",
        "democratic-institutions"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "indirectly-affected"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "regulatory-gap",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "voice-synthesis",
        "foundation-models",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "synthetic-media-manipulation"
        },
        "secondary": [
          {
            "domain": "information-integrity",
            "pattern": "disinformation-campaigns"
          }
        ]
      },
      "sources": [
        {
          "title": "GNET Research: Deep Fakes, Deeper Impacts — AI's Role in the 2024 Indian General Election and Beyond",
          "type": "research",
          "date": "2024-09",
          "url": "https://gnet-research.org/2024/09/11/deep-fakes-deeper-impacts-ais-role-in-the-2024-indian-general-election-and-beyond/"
        },
        {
          "title": "Business Today: Deepfakes of Bollywood Stars Spark Worries of AI Meddling in India Election",
          "type": "news",
          "date": "2024-04",
          "url": "https://www.businesstoday.in/technology/news/story/deepfakes-of-bollywood-stars-spark-worries-of-ai-meddling-in-india-election-426365-2024-04-22"
        }
      ],
      "outcomes": {
        "other": "No regulatory action taken; deepfake content widely circulated without effective countermeasures"
      }
    },
    {
      "id": "INC-24-0012",
      "title": "Morris II — First Self-Replicating AI Worm Demonstrated",
      "slug": "morris-ii-self-replicating-ai-worm",
      "url": "https://topaithreats.com/incidents/INC-24-0012-morris-ii-self-replicating-ai-worm/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2024-03",
      "last_updated": "2026-03-10",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "business-organizations"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "misconfigured-deployment",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "agentic-autonomous",
          "pattern": "agent-to-agent-propagation"
        },
        "secondary": [
          {
            "domain": "security-cyber",
            "pattern": "ai-morphed-malware"
          }
        ]
      },
      "sources": [
        {
          "title": "Here Comes The AI Worm: Unleashing Zero-click Worms that Target GenAI-Powered Applications",
          "type": "primary",
          "date": "2024-03",
          "url": "https://sites.google.com/view/compromptmized"
        },
        {
          "title": "arXiv: ComPromptMized — Unleashing Zero-click Worms",
          "type": "primary",
          "date": "2024-03",
          "url": "https://arxiv.org/abs/2403.02817"
        },
        {
          "title": "IBM Think: Malicious AI Worm Targeting Generative AI",
          "type": "secondary",
          "date": "2024-03",
          "url": "https://www.ibm.com/think/insights/malicious-ai-worm-targeting-generative-ai"
        }
      ],
      "outcomes": null
    },
    {
      "id": "INC-24-0017",
      "title": "Israel Military Deploys AI Facial Recognition in Gaza Leading to Wrongful Detentions",
      "slug": "corsight-gaza-facial-recognition-detentions",
      "url": "https://topaithreats.com/incidents/INC-24-0017-corsight-gaza-facial-recognition-detentions/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "corroborated",
      "date_occurred": "2024-03",
      "last_updated": "2026-03-13",
      "regions": [
        "middle-east",
        "israel",
        "palestine"
      ],
      "sectors": [
        "government",
        "public-safety"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "regulators-public-servants"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "insufficient-safety-testing",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "biometric-data"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "privacy-surveillance",
          "pattern": "biometric-exploitation"
        },
        "secondary": [
          {
            "domain": "privacy-surveillance",
            "pattern": "mass-surveillance-amplification"
          }
        ]
      },
      "sources": [
        {
          "title": "NPR: Israel is using AI-powered facial recognition in Gaza",
          "type": "news",
          "date": "2024-05",
          "url": "https://www.npr.org/2024/05/24/1198910043/gaza-israel-facial-recognition"
        }
      ],
      "outcomes": {
        "other": "Hundreds of wrongful detentions reported; physical abuse during detention documented"
      }
    },
    {
      "id": "INC-24-0026",
      "title": "NYC MyCity AI Chatbot Advises Businesses to Break the Law",
      "slug": "nyc-mycity-chatbot-illegal-advice",
      "url": "https://topaithreats.com/incidents/INC-24-0026-nyc-mycity-chatbot-illegal-advice/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2024-03",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states"
      ],
      "sectors": [
        "government",
        "technology"
      ],
      "affected_groups": [
        "general-public",
        "workers",
        "vulnerable-communities",
        "government-institutions"
      ],
      "exposure_pathways": [
        "direct-interaction",
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators",
        "direct-users"
      ],
      "impact_level": "institution",
      "causal_factors": [
        "hallucination-tendency",
        "insufficient-safety-testing",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "large-language-models",
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "misinformation-hallucinated-content"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "overreliance-automation-bias"
          }
        ]
      },
      "sources": [
        {
          "title": "The Markup: NYC's AI Chatbot Tells Businesses to Break the Law",
          "type": "news",
          "date": "2024-03",
          "url": "https://themarkup.org/artificial-intelligence/2024/03/29/nycs-ai-chatbot-tells-businesses-to-break-the-law"
        },
        {
          "title": "THE CITY: NYC AI Chatbot Tells Small Businesses to Break the Law",
          "type": "news",
          "date": "2024-03",
          "url": "https://www.thecity.nyc/2024/03/29/ai-chat-false-information-small-business/"
        },
        {
          "title": "Fast Company: NYC mayor defends chatbot as AI tool continues to dish out illegal advice",
          "type": "news",
          "date": "2024-04",
          "url": "https://www.fastcompany.com/91087269/nyc-mayor-defends-chatbot-pilot-ai-tool-continues-dish-out-illegal-advice"
        }
      ],
      "outcomes": {
        "regulatory_action": "Chatbot remained active with added disclaimer despite documented legal errors; eventually shut down under new mayoral administration in January 2026"
      }
    },
    {
      "id": "INC-24-0009",
      "title": "Google Gemini Produces Historically Inaccurate Image Outputs Due to Bias Overcorrection",
      "slug": "google-gemini-image-generation-controversy",
      "url": "https://topaithreats.com/incidents/INC-24-0009-google-gemini-image-generation-controversy/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "primary",
      "date_occurred": "2024-02",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "corporate"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "training-data-bias",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "foundation-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "discrimination-social-harm",
          "pattern": "representational-harm"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "deceptive-manipulative-interfaces"
          }
        ]
      },
      "sources": [
        {
          "title": "Google Blog (Prabhakar Raghavan): Gemini image generation got it wrong. We'll do better.",
          "type": "primary",
          "date": "2024-02-23",
          "url": "https://blog.google/products/gemini/gemini-image-generation-issue/"
        },
        {
          "title": "The Verge: Google apologizes for 'missing the mark' with Gemini's image generation",
          "type": "news",
          "date": "2024-02",
          "url": "https://www.theverge.com/2024/2/21/24079371/google-ai-gemini-generative-inaccurate-historical"
        },
        {
          "title": "BBC News: Google pauses AI tool's ability to generate images of people",
          "type": "news",
          "date": "2024-02",
          "url": "https://www.bbc.com/news/technology-68412620"
        },
        {
          "title": "The New York Times: Google Chatbot's A.I. Images Put People of Color in Nazi-Era Uniforms",
          "type": "news",
          "date": "2024-02",
          "url": "https://www.nytimes.com/2024/02/22/technology/google-gemini-german-soldiers-ai.html"
        }
      ],
      "outcomes": {
        "financial_loss": "Not quantified; significant reputational impact on Google's AI credibility",
        "arrests": "None; this was a product defect, not a criminal act",
        "recovery": "Image generation of people paused; Google committed to improving the feature before re-release",
        "regulatory_action": "No formal regulatory action; widely cited in debates about AI alignment and bias calibration"
      }
    },
    {
      "id": "INC-24-0010",
      "title": "Lawsuit Filed After Teenager's Death Linked to Character.AI Chatbot Interactions",
      "slug": "character-ai-teenager-death-lawsuit",
      "url": "https://topaithreats.com/incidents/INC-24-0010-character-ai-teenager-death-lawsuit/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2024-02",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "corporate"
      ],
      "affected_groups": [
        "children",
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "insufficient-safety-testing",
        "inadequate-access-controls",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "deceptive-manipulative-interfaces"
        },
        "secondary": [
          {
            "domain": "agentic-autonomous",
            "pattern": "goal-drift"
          }
        ]
      },
      "sources": [
        {
          "title": "Court Complaint: Garcia v. Character Technologies Inc., U.S. District Court for the Middle District of Florida",
          "type": "primary",
          "date": "2024-10",
          "url": "https://socialmediavictims.org/wp-content/uploads/2024/10/Complaint-Filed-2024-10-22.pdf"
        },
        {
          "title": "The New York Times: Can a Chatbot Drive a Teen to Suicide?",
          "type": "news",
          "date": "2024-10",
          "url": "https://www.nytimes.com/2024/10/23/technology/characterai-lawsuit-teen-suicide.html"
        },
        {
          "title": "The Washington Post: A teen's death puts Character.AI in legal and ethical crosshairs",
          "type": "news",
          "date": "2024-10",
          "url": "https://www.washingtonpost.com/technology/2024/10/23/character-ai-lawsuit-teen-suicide/"
        },
        {
          "title": "Character.AI Blog: Community Safety Updates",
          "type": "primary",
          "date": "2024-10",
          "url": "https://blog.character.ai/community-safety-updates/"
        }
      ],
      "outcomes": {
        "financial_loss": "Not quantified; litigation ongoing",
        "arrests": "None; civil litigation",
        "recovery": "Character.AI implemented new safety measures for minor users",
        "regulatory_action": "Incident cited in congressional hearings on AI safety and child protection; ongoing litigation"
      }
    },
    {
      "id": "INC-24-0001",
      "title": "Hong Kong Deepfake CFO Video Conference Fraud",
      "slug": "hong-kong-deepfake-cfo-fraud",
      "url": "https://topaithreats.com/incidents/INC-24-0001-hong-kong-deepfake-cfo-fraud/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2024-01",
      "last_updated": "2026-02-15",
      "regions": [
        "asia",
        "hong-kong"
      ],
      "sectors": [
        "corporate",
        "finance"
      ],
      "affected_groups": [
        "business-organizations"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "organizational-leaders"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "intentional-fraud",
        "social-engineering"
      ],
      "assets_involved": [
        "voice-synthesis",
        "identity-credentials",
        "biometric-data"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "deepfake-identity-hijacking"
        },
        "secondary": [
          {
            "domain": "information-integrity",
            "pattern": "ai-enabled-fraud"
          },
          {
            "domain": "security-cyber",
            "pattern": "social-engineering-via-ai"
          },
          {
            "domain": "security-cyber",
            "pattern": "adversarial-evasion"
          }
        ]
      },
      "sources": [
        {
          "title": "Hong Kong Police Statement",
          "type": "primary",
          "date": "2024-02",
          "url": "https://www.scmp.com/news/hong-kong/law-and-crime/article/3250851/everyone-looked-real-multinational-firms-hong-kong-office-loses-hk200-million-after-scammers-stage"
        },
        {
          "title": "CNN Coverage",
          "type": "news",
          "date": "2024-02",
          "url": "https://www.cnn.com/2024/02/04/asia/deepfake-cfo-scam-hong-kong-intl-hnk/index.html"
        },
        {
          "title": "CNN Follow-up: Arup Confirmed as Victim",
          "type": "news",
          "date": "2024-05",
          "url": "https://edition.cnn.com/2024/05/16/tech/arup-deepfake-scam-loss-hong-kong-intl-hnk"
        },
        {
          "title": "Financial Times Coverage",
          "type": "news",
          "date": "2024-05",
          "url": "https://www.ft.com/content/b977e8d4-664c-4ae4-8a8e-eb93bdf785ea"
        },
        {
          "title": "World Economic Forum Analysis",
          "type": "news",
          "date": "2025-02",
          "url": "https://www.weforum.org/stories/2025/02/deepfake-ai-cybercrime-arup/"
        }
      ],
      "outcomes": {
        "financial_loss": "$25.6 million USD (HK$200 million)",
        "arrests": "Six arrests made by Hong Kong police",
        "recovery": "Unknown"
      }
    },
    {
      "id": "INC-24-0002",
      "title": "AI-Generated Biden Robocall in New Hampshire Primary",
      "slug": "ai-generated-election-robocall",
      "url": "https://topaithreats.com/incidents/INC-24-0002-ai-generated-election-robocall/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2024-01",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "elections",
        "government"
      ],
      "affected_groups": [
        "democratic-institutions",
        "general-public"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "indirectly-affected"
      ],
      "impact_level": "institution",
      "causal_factors": [
        "intentional-fraud",
        "social-engineering"
      ],
      "assets_involved": [
        "voice-synthesis",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "disinformation-campaigns"
        },
        "secondary": [
          {
            "domain": "information-integrity",
            "pattern": "deepfake-identity-hijacking"
          }
        ]
      },
      "sources": [
        {
          "title": "FCC: FCC Makes AI-Generated Voices in Robocalls Illegal",
          "type": "primary",
          "date": "2024-02",
          "url": "https://www.fcc.gov/document/fcc-makes-ai-generated-voices-robocalls-illegal"
        },
        {
          "title": "New Hampshire DOJ: Voter Suppression AI Robocall Investigation Update",
          "type": "primary",
          "date": "2024-02",
          "url": "https://www.doj.nh.gov/news-and-media/voter-suppression-ai-robocall-investigation-update"
        },
        {
          "title": "NPR: A Political Consultant Faces Charges and Fines for Biden Deepfake Robocalls",
          "type": "news",
          "date": "2024-05",
          "url": "https://www.npr.org/2024/05/23/nx-s1-4977582/fcc-ai-deepfake-robocall-biden-new-hampshire-political-operative"
        }
      ],
      "outcomes": {
        "financial_loss": "Not applicable",
        "arrests": "Political consultant Steve Kramer charged with voter suppression",
        "recovery": "Not applicable",
        "regulatory_action": "FCC declared AI-generated voice robocalls illegal; political consultant charged"
      }
    },
    {
      "id": "INC-24-0003",
      "title": "AI-Generated Deepfake Audio Used to Frame High School Principal in Baltimore",
      "slug": "pikesville-high-school-deepfake-principal",
      "url": "https://topaithreats.com/incidents/INC-24-0003-pikesville-high-school-deepfake-principal/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2024-01",
      "last_updated": "2026-02-09",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "education"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "direct-users"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "intentional-fraud"
      ],
      "assets_involved": [
        "voice-synthesis"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "deepfake-identity-hijacking"
        },
        "secondary": [
          {
            "domain": "discrimination-social-harm",
            "pattern": "representational-harm"
          }
        ]
      },
      "sources": [
        {
          "title": "CNN: Pikesville High School's principal was accused of offensive language on a recording. Authorities now say it was a deepfake",
          "type": "news",
          "date": "2024-04",
          "url": "https://www.cnn.com/2024/04/26/us/pikesville-principal-maryland-deepfake-cec/index.html"
        },
        {
          "title": "CBS News Baltimore: School principal was framed using AI-generated racist rant, police say",
          "type": "news",
          "date": "2024-04",
          "url": "https://www.cbsnews.com/baltimore/news/maryland-framed-principal-racist-ai-generated-voice/"
        },
        {
          "title": "Baltimore Sun: Pikesville High athletic director used AI to fake racist recording of principal, police say",
          "type": "news",
          "date": "2024-04",
          "url": "https://www.baltimoresun.com/2024/04/25/racist-recording-pikesville-athletic-director/"
        },
        {
          "title": "U.S. News & World Report: Former School Athletic Director Gets 4 Months in Jail in Racist AI Deepfake Case",
          "type": "news",
          "date": "2025-04",
          "url": "https://www.usnews.com/news/us/articles/2025-04-29/former-school-athletic-director-gets-4-months-in-jail-in-racist-ai-deepfake-case"
        }
      ],
      "outcomes": {
        "financial_loss": "Not quantified; significant institutional disruption",
        "arrests": "Dazhon Darien arrested and charged with multiple offenses",
        "recovery": "Principal Eiswert cleared; Darien convicted and sentenced",
        "regulatory_action": "Case cited in legislative discussions about AI deepfake protections"
      }
    },
    {
      "id": "INC-24-0004",
      "title": "FBI Elder Fraud Report Documents AI-Enhanced Financial Scams Against Seniors",
      "slug": "fbi-elder-fraud-ai-enhanced-scams",
      "url": "https://topaithreats.com/incidents/INC-24-0004-fbi-elder-fraud-ai-enhanced-scams/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2024-01",
      "last_updated": "2026-02-09",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "finance"
      ],
      "affected_groups": [
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "indirectly-affected"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "intentional-fraud",
        "social-engineering"
      ],
      "assets_involved": [
        "voice-synthesis",
        "identity-credentials"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "deepfake-identity-hijacking"
        },
        "secondary": [
          {
            "domain": "information-integrity",
            "pattern": "ai-enabled-fraud"
          },
          {
            "domain": "security-cyber",
            "pattern": "social-engineering-via-ai"
          },
          {
            "domain": "security-cyber",
            "pattern": "ai-morphed-malware"
          }
        ]
      },
      "sources": [
        {
          "title": "FBI IC3: 2023 Elder Fraud Report",
          "type": "primary",
          "date": "2024",
          "url": "https://www.ic3.gov/annualreport/reports/2023_ic3elderfraudreport.pdf"
        },
        {
          "title": "FBI: Elder Fraud, in Focus",
          "type": "primary",
          "date": "2024",
          "url": "https://www.fbi.gov/news/stories/elder-fraud-in-focus"
        },
        {
          "title": "ABC News: Americans older than 60 lost $3.4 billion to scams in 2023: FBI",
          "type": "news",
          "date": "2024",
          "url": "https://abcnews.go.com/Politics/elderly-americans-lost-34-billion-scams-2023-fbi/story?id=109783683"
        },
        {
          "title": "FTC: FTC Announces Exploratory Challenge to Prevent the Harms of AI-enabled Voice Cloning",
          "type": "primary",
          "date": "2023-11",
          "url": "https://www.ftc.gov/news-events/news/press-releases/2023/11/ftc-announces-exploratory-challenge-prevent-harms-ai-enabled-voice-cloning"
        },
        {
          "title": "American Bar Association: Artificial Intelligence in Financial Scams Against Older Adults",
          "type": "analysis",
          "date": "2024",
          "url": "https://www.americanbar.org/groups/law_aging/publications/bifocal/vol45/vol45issue6/artificialintelligenceandfinancialscams/"
        }
      ],
      "outcomes": {
        "financial_loss": "$3.4 billion (2023 confirmed); approximately $4.9 billion (2024 preliminary)",
        "arrests": "Not applicable (systemic report)",
        "recovery": "Not applicable (systemic report)",
        "regulatory_action": "FTC Voice Cloning Challenge launched; AI voice calls classified as illegal robocalls; Senate hearing held; FBI IC3 annual reporting"
      }
    },
    {
      "id": "INC-24-0007",
      "title": "Indirect Prompt Injection: How Attackers Hijack LLM Apps",
      "slug": "indirect-prompt-injection-attacks",
      "url": "https://topaithreats.com/incidents/INC-24-0007-indirect-prompt-injection-attacks/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2024-01",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america",
        "europe"
      ],
      "sectors": [
        "corporate",
        "cross-sector"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "general-public"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "inadequate-access-controls",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "autonomous-agents"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "adversarial-evasion"
        },
        "secondary": [
          {
            "domain": "security-cyber",
            "pattern": "prompt-injection-attack"
          },
          {
            "domain": "agentic-autonomous",
            "pattern": "tool-misuse-privilege-escalation"
          }
        ]
      },
      "sources": [
        {
          "title": "NIST AI 100-2e2023: Adversarial Machine Learning — A Taxonomy and Terminology of Attacks and Mitigations",
          "type": "primary",
          "date": "2024-01",
          "url": "https://csrc.nist.gov/pubs/ai/100/2/e2023/final"
        },
        {
          "title": "Greshake et al.: Not What You've Signed Up For — Compromising Real-World LLM-Integrated Applications with Indirect Prompt Injection (arXiv)",
          "type": "analysis",
          "date": "2023-05",
          "url": "https://arxiv.org/abs/2302.12173"
        },
        {
          "title": "Berger, Markus: Prompt Injection Attacks on Bing Chat and Similar Systems",
          "type": "analysis",
          "date": "2023-04",
          "url": "https://greshake.github.io/"
        },
        {
          "title": "OWASP Top 10 for Large Language Model Applications: LLM01 — Prompt Injection",
          "type": "primary",
          "date": "2023-10",
          "url": "https://owasp.org/www-project-top-10-for-large-language-model-applications/"
        }
      ],
      "outcomes": {
        "financial_loss": "Not publicly quantified at aggregate level",
        "arrests": "None",
        "recovery": "Ongoing; no comprehensive mitigation deployed across the industry",
        "regulatory_action": "NIST classification as a primary AI security risk; OWASP designation as the top LLM application vulnerability"
      }
    },
    {
      "id": "INC-24-0008",
      "title": "AI-Generated Non-Consensual Intimate Images of Taylor Swift Circulate on Social Media",
      "slug": "taylor-swift-deepfake-images",
      "url": "https://topaithreats.com/incidents/INC-24-0008-taylor-swift-deepfake-images/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2024-01",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "corporate",
        "cross-sector"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "direct-users"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "intentional-fraud",
        "regulatory-gap",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "foundation-models",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "synthetic-media-manipulation"
        },
        "secondary": [
          {
            "domain": "discrimination-social-harm",
            "pattern": "representational-harm"
          }
        ]
      },
      "sources": [
        {
          "title": "The Washington Post: Explicit deepfake images of Taylor Swift went viral. Here's how it happened.",
          "type": "news",
          "date": "2024-01",
          "url": "https://www.washingtonpost.com/technology/2024/01/25/taylor-swift-ai-deepfake-images-xtwitter/"
        },
        {
          "title": "BBC News: Taylor Swift deepfake images spark outrage",
          "type": "news",
          "date": "2024-01",
          "url": "https://www.bbc.com/news/technology-68110476"
        },
        {
          "title": "White House Press Briefing: Press Briefing by Press Secretary Karine Jean-Pierre",
          "type": "primary",
          "date": "2024-01-26",
          "url": "https://www.whitehouse.gov/briefing-room/press-briefings/2024/01/26/press-briefing-by-press-secretary-karine-jean-pierre-january-26-2024/"
        },
        {
          "title": "Reuters: US lawmakers rally behind Taylor Swift after AI deepfake images spread online",
          "type": "news",
          "date": "2024-01",
          "url": "https://www.reuters.com/technology/us-lawmakers-rally-behind-taylor-swift-after-ai-deepfake-images-spread-online-2024-01-29/"
        }
      ],
      "outcomes": {
        "financial_loss": "Not quantified",
        "arrests": "None publicly reported at federal level",
        "recovery": "Images removed from X; search terms blocked; legislative proposals advanced",
        "regulatory_action": "DEFIANCE Act and No FAKES Act introduced in U.S. Senate; White House public statement"
      }
    },
    {
      "id": "INC-24-0025",
      "title": "DPD AI Chatbot Swears at Customer and Writes Poem Criticizing the Company",
      "slug": "dpd-ai-chatbot-swearing-incident",
      "url": "https://topaithreats.com/incidents/INC-24-0025-dpd-ai-chatbot-swearing-incident/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "low",
      "evidence_level": "corroborated",
      "date_occurred": "2024-01",
      "last_updated": "2026-03-13",
      "regions": [
        "europe",
        "united-kingdom"
      ],
      "sectors": [
        "corporate",
        "technology"
      ],
      "affected_groups": [
        "general-public",
        "business-organizations"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "direct-users"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "misconfigured-deployment",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "overreliance-automation-bias"
        }
      },
      "sources": [
        {
          "title": "TIME: AI Chatbot Curses at Customer and Criticizes Work Company",
          "type": "news",
          "date": "2024-01",
          "url": "https://time.com/6564726/ai-chatbot-dpd-curses-criticizes-company/"
        },
        {
          "title": "ITV News: DPD disables AI chatbot after it swears at customer",
          "type": "news",
          "date": "2024-01",
          "url": "https://www.itv.com/news/2024-01-19/dpd-disables-ai-chatbot-after-customer-service-bot-appears-to-go-rogue"
        },
        {
          "title": "The Register: DPD chatbot goes off the rails",
          "type": "news",
          "date": "2024-01",
          "url": "https://www.theregister.com/2024/01/23/dpd_chatbot_goes_rogue/"
        }
      ],
      "outcomes": {}
    },
    {
      "id": "INC-23-0011",
      "title": "New York Times Copyright Lawsuit Against OpenAI",
      "slug": "nyt-openai-copyright-lawsuit",
      "url": "https://topaithreats.com/incidents/INC-23-0011-nyt-openai-copyright-lawsuit/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2023-12",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "corporate"
      ],
      "affected_groups": [
        "business-organizations"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "regulatory-gap",
        "accountability-vacuum",
        "competitive-pressure"
      ],
      "assets_involved": [
        "large-language-models",
        "training-datasets"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "economic-labor",
          "pattern": "power-data-concentration"
        },
        "secondary": [
          {
            "domain": "privacy-surveillance",
            "pattern": "behavioral-profiling-without-consent"
          }
        ]
      },
      "sources": [
        {
          "title": "The New York Times Company v. Microsoft Corporation et al., Case No. 1:23-cv-11195 (S.D.N.Y.)",
          "type": "primary",
          "date": "2023-12",
          "url": "https://nytco-assets.nytimes.com/2023/12/NYT_Complaint_Dec2023.pdf"
        },
        {
          "title": "Reuters: New York Times sues OpenAI, Microsoft for infringing copyrighted work",
          "type": "news",
          "date": "2023-12",
          "url": "https://www.reuters.com/legal/new-york-times-sues-openai-microsoft-infringing-copyrighted-work-2023-12-27/"
        },
        {
          "title": "The Verge: The New York Times is suing OpenAI and Microsoft for copyright infringement",
          "type": "news",
          "date": "2023-12",
          "url": "https://www.theverge.com/2023/12/27/24016212/new-york-times-openai-microsoft-lawsuit-copyright-infringement"
        }
      ],
      "outcomes": {
        "financial_loss": "NYT complaint seeks billions of dollars in statutory and actual damages",
        "arrests": "Not applicable",
        "recovery": "Case ongoing as of early 2026",
        "regulatory_action": "No direct regulatory action; case may establish legal precedent for AI training data rights"
      }
    },
    {
      "id": "INC-23-0013",
      "title": "FTC Bans Rite Aid from Using Facial Recognition Technology",
      "slug": "rite-aid-ftc-facial-recognition-ban",
      "url": "https://topaithreats.com/incidents/INC-23-0013-rite-aid-ftc-facial-recognition-ban/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2023-12",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "corporate"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "regulators-public-servants"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "training-data-bias",
        "misconfigured-deployment",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "biometric-data"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "privacy-surveillance",
          "pattern": "biometric-exploitation"
        },
        "secondary": [
          {
            "domain": "discrimination-social-harm",
            "pattern": "data-imbalance-bias"
          }
        ]
      },
      "sources": [
        {
          "title": "Federal Trade Commission: Rite Aid Banned from Using AI Facial Recognition After FTC Says Retailer Deployed Technology without Reasonable Safeguards",
          "type": "primary",
          "date": "2023-12",
          "url": "https://www.ftc.gov/news-events/news/press-releases/2023/12/rite-aid-banned-using-ai-facial-recognition-after-ftc-says-retailer-deployed-technology-without"
        },
        {
          "title": "Federal Trade Commission: Complaint and Consent Order, In the Matter of Rite Aid Corporation",
          "type": "primary",
          "date": "2023-12",
          "url": "https://www.ftc.gov/legal-library/browse/cases-proceedings/2023190"
        },
        {
          "title": "Reuters: US FTC Bans Rite Aid from Using Facial Recognition Technology for Five Years",
          "type": "news",
          "date": "2023-12",
          "url": "https://www.reuters.com/technology/us-ftc-bans-rite-aid-using-facial-recognition-technology-five-years-2023-12-19/"
        },
        {
          "title": "The Washington Post: Rite Aid Used Facial Recognition in Stores for Years. Now It's Banned.",
          "type": "news",
          "date": "2023-12",
          "url": "https://www.washingtonpost.com/technology/2023/12/19/rite-aid-ftc-facial-recognition-ban/"
        }
      ],
      "outcomes": {
        "financial_loss": "Not publicly quantified; potential losses from false-positive confrontations and reputational damage",
        "arrests": "None",
        "recovery": "Rite Aid required to delete all images and data collected through facial recognition systems",
        "regulatory_action": "Five-year ban on facial recognition use; requirement to implement comprehensive data security program; obligation to delete collected biometric data"
      }
    },
    {
      "id": "INC-23-0015",
      "title": "Sports Illustrated Published AI-Generated Articles Under Fake Author Names",
      "slug": "sports-illustrated-ai-fake-authors",
      "url": "https://topaithreats.com/incidents/INC-23-0015-sports-illustrated-ai-fake-authors/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2023-11",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "corporate"
      ],
      "affected_groups": [
        "general-public",
        "business-organizations"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "deployers-operators"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "intentional-fraud",
        "competitive-pressure"
      ],
      "assets_involved": [
        "large-language-models",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "misinformation-hallucinated-content"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "deceptive-manipulative-interfaces"
          }
        ]
      },
      "sources": [
        {
          "title": "Futurism: Sports Illustrated Published Articles by Fake, AI-Generated Writers",
          "type": "primary",
          "date": "2023-11",
          "url": "https://futurism.com/sports-illustrated-ai-generated-writers"
        },
        {
          "title": "NPR: Sports Illustrated is the latest publication found to have used AI-generated content",
          "type": "news",
          "date": "2023-11",
          "url": "https://www.npr.org/2023/11/28/1215506903/sports-illustrated-ai-generated-articles"
        },
        {
          "title": "The New York Times: Sports Illustrated Published Articles With AI-Generated Content",
          "type": "news",
          "date": "2023-11",
          "url": "https://www.nytimes.com/2023/11/28/business/media/sports-illustrated-ai-articles.html"
        }
      ],
      "outcomes": {
        "financial_loss": "Not publicly quantified; significant reputational damage to Sports Illustrated brand",
        "arrests": "None",
        "recovery": "Articles removed; relationship with AdVon Commerce terminated; Arena Group CEO fired",
        "regulatory_action": "No formal regulatory action; incident widely cited in media ethics discussions"
      }
    },
    {
      "id": "INC-23-0008",
      "title": "AI-Generated Deepfake Nude Images of Students at Westfield High School",
      "slug": "westfield-high-school-deepfake-nudes",
      "url": "https://topaithreats.com/incidents/INC-23-0008-westfield-high-school-deepfake-nudes/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2023-10",
      "last_updated": "2026-02-09",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "education"
      ],
      "affected_groups": [
        "children"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "direct-users"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "intentional-fraud",
        "inadequate-access-controls",
        "regulatory-gap"
      ],
      "assets_involved": [
        "foundation-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "synthetic-media-manipulation"
        },
        "secondary": [
          {
            "domain": "discrimination-social-harm",
            "pattern": "representational-harm"
          }
        ]
      },
      "sources": [
        {
          "title": "CNN: High schooler calls for AI regulations after manipulated pornographic images of her and others shared online",
          "type": "news",
          "date": "2023-11",
          "url": "https://www.cnn.com/2023/11/04/us/new-jersey-high-school-deepfake-porn/index.html"
        },
        {
          "title": "CBS News: New Jersey teen sues classmate for allegedly creating, sharing fake AI nudes",
          "type": "news",
          "date": "2024",
          "url": "https://www.cbsnews.com/news/new-jersey-teen-sues-classmate-for-allegedly-creating-sharing-fake-ai-nudes/"
        },
        {
          "title": "Axios: Teens exploited by fake nudes illustrate threat of unregulated AI",
          "type": "news",
          "date": "2023-11",
          "url": "https://www.axios.com/2023/11/03/ai-deepfake-nude-images-new-jersey-high-school"
        },
        {
          "title": "WHYY (NPR Philadelphia): Teen girls are being victimized by deepfake nudes. One N.J. family is pushing for more protections",
          "type": "news",
          "date": "2023",
          "url": "https://whyy.org/articles/teen-girls-victimized-deepfake-nudes/"
        }
      ],
      "outcomes": {
        "financial_loss": "Federal lawsuit seeking $150,000 per disclosure plus additional damages",
        "arrests": "None publicly reported; civil lawsuit filed",
        "recovery": "Legislative reform enacted in New Jersey",
        "regulatory_action": "New Jersey deepfake protection legislation signed into law (April 2025); incident cited in policy debates across multiple U.S. states"
      }
    },
    {
      "id": "INC-23-0007",
      "title": "AI-Generated Deepfake Audio Used to Influence Slovak Parliamentary Election",
      "slug": "slovakia-election-deepfake-audio",
      "url": "https://topaithreats.com/incidents/INC-23-0007-slovakia-election-deepfake-audio/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2023-09",
      "last_updated": "2026-02-09",
      "regions": [
        "europe"
      ],
      "sectors": [
        "elections"
      ],
      "affected_groups": [
        "democratic-institutions",
        "general-public"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "indirectly-affected"
      ],
      "impact_level": "institution",
      "causal_factors": [
        "intentional-fraud",
        "social-engineering"
      ],
      "assets_involved": [
        "voice-synthesis",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "disinformation-campaigns"
        },
        "secondary": [
          {
            "domain": "information-integrity",
            "pattern": "deepfake-identity-hijacking"
          }
        ]
      },
      "sources": [
        {
          "title": "Harvard Kennedy School Misinformation Review: Beyond the deepfake hype: AI, democracy, and 'the Slovak case'",
          "type": "academic",
          "date": "2024",
          "url": "https://misinforeview.hks.harvard.edu/article/beyond-the-deepfake-hype-ai-democracy-and-the-slovak-case/"
        },
        {
          "title": "CNN: A fake recording of a candidate saying he'd rigged the election went viral",
          "type": "news",
          "date": "2024-02",
          "url": "https://www.cnn.com/2024/02/01/politics/election-deepfake-threats-invs"
        },
        {
          "title": "VSquare.org: Slovak election targeted by pro-Kremlin deepfake hoax",
          "type": "news",
          "date": "2023-09",
          "url": "https://vsquare.org/slovak-election-targeted-by-pro-kremlin-deepfake-hoax/"
        },
        {
          "title": "International Press Institute: Slovakia: Deepfake audio of Dennik N journalist offers worrying example of AI abuse",
          "type": "news",
          "date": "2023-09",
          "url": "https://ipi.media/slovakia-deepfake-audio-of-dennik-n-journalist-offers-worrying-example-of-ai-abuse/"
        },
        {
          "title": "Bruce Schneier: Deepfake Election Interference in Slovakia",
          "type": "analysis",
          "date": "2023-10",
          "url": "https://www.schneier.com/blog/archives/2023/10/deepfake-election-interference-in-slovakia.html"
        }
      ],
      "outcomes": {
        "financial_loss": "Not applicable",
        "arrests": "None publicly reported",
        "recovery": "Not applicable; election proceeded as scheduled",
        "regulatory_action": "Incident cited in EU and international policy discussions on AI regulation and election integrity"
      }
    },
    {
      "id": "INC-23-0012",
      "title": "Zoom AI Training Terms of Service Controversy",
      "slug": "zoom-ai-training-terms-controversy",
      "url": "https://topaithreats.com/incidents/INC-23-0012-zoom-ai-training-terms-controversy/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "primary",
      "date_occurred": "2023-08",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "corporate"
      ],
      "affected_groups": [
        "general-public",
        "business-organizations"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "regulatory-gap",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "training-datasets",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "privacy-surveillance",
          "pattern": "behavioral-profiling-without-consent"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "loss-of-human-agency"
          }
        ]
      },
      "sources": [
        {
          "title": "Zoom Terms of Service (Updated August 2023)",
          "type": "primary",
          "date": "2023-08",
          "url": "https://explore.zoom.us/en/terms/"
        },
        {
          "title": "Zoom Blog: Zoom's Updated Terms of Service (CEO Eric Yuan)",
          "type": "primary",
          "date": "2023-08",
          "url": "https://blog.zoom.us/zooms-term-service-ai/"
        },
        {
          "title": "Ars Technica: Zoom's Updated Terms Let It Train AI on Your Content Without Opt-Out",
          "type": "news",
          "date": "2023-08",
          "url": "https://arstechnica.com/tech-policy/2023/08/zoom-terms-let-it-train-ai-on-your-content-without-consent/"
        },
        {
          "title": "The Verge: Zoom Says It Won't Use Your Calls to Train AI Without Consent After Backlash",
          "type": "news",
          "date": "2023-08",
          "url": "https://www.theverge.com/2023/8/7/23823738/zoom-ai-terms-of-service-update"
        }
      ],
      "outcomes": {
        "financial_loss": "Not applicable",
        "arrests": "None",
        "recovery": "Terms of service amended following public pressure",
        "regulatory_action": "No formal regulatory action; self-correction by Zoom in response to public backlash"
      }
    },
    {
      "id": "INC-23-0006",
      "title": "WormGPT: AI-Powered Business Email Compromise Tool",
      "slug": "wormgpt-cybercrime-tool",
      "url": "https://topaithreats.com/incidents/INC-23-0006-wormgpt-cybercrime-tool/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2023-07",
      "last_updated": "2025-01-15",
      "regions": [
        "north-america",
        "europe"
      ],
      "sectors": [
        "corporate",
        "finance"
      ],
      "affected_groups": [
        "business-organizations",
        "general-public"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "weaponization",
        "intentional-fraud"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "ai-morphed-malware"
        },
        "secondary": [
          {
            "domain": "security-cyber",
            "pattern": "social-engineering-via-ai"
          },
          {
            "domain": "information-integrity",
            "pattern": "disinformation-campaigns"
          }
        ]
      },
      "sources": [
        {
          "title": "SlashNext: WormGPT — The Generative AI Tool Cybercriminals Are Using to Launch Business Email Compromise Attacks",
          "type": "news",
          "date": "2023-07",
          "url": "https://slashnext.com/blog/wormgpt-the-generative-ai-tool-cybercriminals-are-using-to-launch-business-email-compromise-attacks/"
        },
        {
          "title": "FBI Advisory on AI-Enhanced Business Email Compromise",
          "type": "primary",
          "date": "2023-09",
          "url": ""
        }
      ],
      "outcomes": {
        "financial_loss": "Not quantified; BEC attacks globally cause billions annually",
        "arrests": "None publicly reported for WormGPT specifically",
        "recovery": "Not applicable",
        "regulatory_action": "FBI advisory issued; original tool development ceased after public exposure; variants emerged"
      }
    },
    {
      "id": "INC-23-0005",
      "title": "AI-Fabricated Legal Citations in U.S. Courts",
      "slug": "chatgpt-hallucination-lawyer",
      "url": "https://topaithreats.com/incidents/INC-23-0005-chatgpt-hallucination-lawyer/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2023-05",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "legal"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "hallucination-tendency",
        "over-automation"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "misinformation-hallucinated-content"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "overreliance-automation-bias"
          }
        ]
      },
      "sources": [
        {
          "title": "Mata v. Avianca, Inc. — Court Order on Sanctions",
          "type": "primary",
          "date": "2023-06",
          "url": "https://law.justia.com/cases/federal/district-courts/new-york/nysdce/1:2022cv01461/575368/54/"
        },
        {
          "title": "JD Supra: Lawyers Continue to Get in Hot Water for Citing AI Hallucinated Cases",
          "type": "news",
          "date": "2025-05",
          "url": "https://www.jdsupra.com/legalnews/lawyers-continue-to-get-in-hot-water-9433649/"
        },
        {
          "title": "JD Supra: Fifth Circuit Explains How (Not) to Use AI in Briefing",
          "type": "news",
          "date": "2025-02",
          "url": "https://www.jdsupra.com/legalnews/fifth-circuit-explains-how-not-to-use-8303604/"
        }
      ],
      "outcomes": {
        "regulatory_action": "Sanctions imposed in over a dozen federal and state court cases; ABA Formal Opinion 512 issued July 2024; multiple courts adopted mandatory AI disclosure requirements"
      }
    },
    {
      "id": "INC-23-0010",
      "title": "Chegg Stock Collapse After ChatGPT Disruption",
      "slug": "chegg-chatgpt-disruption",
      "url": "https://topaithreats.com/incidents/INC-23-0010-chegg-chatgpt-disruption/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2023-05",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "education",
        "corporate"
      ],
      "affected_groups": [
        "business-organizations",
        "workers"
      ],
      "exposure_pathways": [
        "economic-displacement"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "competitive-pressure",
        "over-automation"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "economic-labor",
          "pattern": "economic-dependency-on-black-box-systems"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "loss-of-human-agency"
          }
        ]
      },
      "sources": [
        {
          "title": "Chegg Q1 2023 Earnings Call Transcript",
          "type": "primary",
          "date": "2023-05",
          "url": "https://investor.chegg.com/press-releases/press-release-details/2023/Chegg-Reports-Q1-2023-Financial-Results/default.aspx"
        },
        {
          "title": "CNBC: Chegg shares plunge nearly 50% after company says ChatGPT is hurting its business",
          "type": "news",
          "date": "2023-05",
          "url": "https://www.cnbc.com/2023/05/02/chegg-shares-plunge-nearly-50percent-after-company-says-chatgpt-is-hurting-its-business.html"
        },
        {
          "title": "Bloomberg: Chegg Cuts 23% of Staff as ChatGPT Eats Into Business",
          "type": "news",
          "date": "2023-06",
          "url": "https://www.bloomberg.com/news/articles/2023-06-12/chegg-to-cut-4-of-staff-as-chatgpt-eats-into-business"
        }
      ],
      "outcomes": {
        "financial_loss": "Market capitalization declined from approximately $12 billion (2021 peak) to under $1 billion by late 2023",
        "arrests": "Not applicable",
        "recovery": "Chegg launched Chegg AI assistant product in attempt to pivot; subscriber losses continued through 2024",
        "regulatory_action": "None; market-driven disruption rather than regulatory event"
      }
    },
    {
      "id": "INC-23-0002",
      "title": "Samsung Semiconductor Trade Secret Leak via ChatGPT",
      "slug": "samsung-chatgpt-data-leak",
      "url": "https://topaithreats.com/incidents/INC-23-0002-samsung-chatgpt-data-leak/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2023-03",
      "last_updated": "2026-02-15",
      "regions": [
        "asia"
      ],
      "sectors": [
        "manufacturing",
        "corporate"
      ],
      "affected_groups": [
        "business-organizations"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "direct-users",
        "deployers-operators"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "inadequate-access-controls",
        "misconfigured-deployment"
      ],
      "assets_involved": [
        "large-language-models",
        "training-datasets"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "model-inversion-data-extraction"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "overreliance-automation-bias"
          }
        ]
      },
      "sources": [
        {
          "title": "Bloomberg: Samsung Bans ChatGPT and Other Generative AI Use by Staff After Leak",
          "type": "news",
          "date": "2023-05",
          "url": "https://www.bloomberg.com/news/articles/2023-05-02/samsung-bans-chatgpt-and-other-generative-ai-use-by-staff-after-leak"
        },
        {
          "title": "TechCrunch: Samsung Bans Use of Generative AI Tools Like ChatGPT After April Internal Data Leak",
          "type": "news",
          "date": "2023-05",
          "url": "https://techcrunch.com/2023/05/02/samsung-bans-use-of-generative-ai-tools-like-chatgpt-after-april-internal-data-leak/"
        }
      ],
      "outcomes": {
        "financial_loss": "Not publicly quantified",
        "arrests": "None",
        "recovery": "Data entered into ChatGPT cannot be retrieved or deleted from training data",
        "regulatory_action": "Samsung imposed internal ban on all generative AI tools"
      }
    },
    {
      "id": "INC-23-0003",
      "title": "Italy Temporary Ban on ChatGPT for GDPR Violations",
      "slug": "italy-chatgpt-gdpr-ban",
      "url": "https://topaithreats.com/incidents/INC-23-0003-italy-chatgpt-gdpr-ban/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "primary",
      "date_occurred": "2023-03",
      "last_updated": "2025-01-15",
      "regions": [
        "europe"
      ],
      "sectors": [
        "government",
        "regulation"
      ],
      "affected_groups": [
        "general-public",
        "government-institutions"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "regulators-public-servants"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "regulatory-gap",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models",
        "training-datasets"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "privacy-surveillance",
          "pattern": "sensitive-attribute-inference"
        },
        "secondary": [
          {
            "domain": "privacy-surveillance",
            "pattern": "behavioral-profiling-without-consent"
          }
        ]
      },
      "sources": [
        {
          "title": "Garante per la Protezione dei Dati Personali: Provvedimento del 30 marzo 2023",
          "type": "primary",
          "date": "2023-03",
          "url": "https://www.garanteprivacy.it/web/guest/home/docweb/-/docweb-display/docweb/9870832"
        }
      ],
      "outcomes": {
        "financial_loss": "Not applicable to affected individuals",
        "arrests": "None",
        "recovery": "Service restored after OpenAI implemented privacy controls",
        "regulatory_action": "Temporary ban lifted after OpenAI implemented privacy controls; EUR 15 million fine"
      }
    },
    {
      "id": "INC-23-0004",
      "title": "AI Voice Cloning Used in Grandparent Scam Network Targeting Newfoundland Seniors",
      "slug": "newfoundland-ai-voice-cloning-grandparent-scam",
      "url": "https://topaithreats.com/incidents/INC-23-0004-newfoundland-ai-voice-cloning-grandparent-scam/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2023-03",
      "last_updated": "2026-02-09",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "finance"
      ],
      "affected_groups": [
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "indirectly-affected"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "intentional-fraud",
        "social-engineering"
      ],
      "assets_involved": [
        "voice-synthesis",
        "identity-credentials"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "deepfake-identity-hijacking"
        },
        "secondary": [
          {
            "domain": "information-integrity",
            "pattern": "ai-enabled-fraud"
          },
          {
            "domain": "security-cyber",
            "pattern": "social-engineering-via-ai"
          },
          {
            "domain": "security-cyber",
            "pattern": "ai-morphed-malware"
          }
        ]
      },
      "sources": [
        {
          "title": "CBC News: How scammers likely used artificial intelligence to con Newfoundland seniors out of $200K",
          "type": "news",
          "date": "2023-03",
          "url": "https://www.cbc.ca/news/canada/newfoundland-labrador/ai-vocal-cloning-grandparent-scam-1.6777106"
        },
        {
          "title": "FTC Consumer Alert: Scammers use AI to enhance their family emergency schemes",
          "type": "primary",
          "date": "2023-03",
          "url": "https://consumer.ftc.gov/consumer-alerts/2023/03/scammers-use-ai-enhance-their-family-emergency-schemes"
        },
        {
          "title": "NPR: Scammers are using AI-generated voice clones, the FTC warns",
          "type": "news",
          "date": "2023-03",
          "url": "https://www.npr.org/2023/03/22/1165448073/voice-clones-ai-scams-ftc"
        },
        {
          "title": "CBC News Saskatoon: Scammers can easily use voice-cloning AI to con family members: expert",
          "type": "news",
          "date": "2023",
          "url": "https://www.cbc.ca/news/canada/saskatoon/fraudsters-likely-using-ai-to-scam-seniors-1.6879807"
        }
      ],
      "outcomes": {
        "financial_loss": "CA$200,000 (combined losses from at least eight victims)",
        "arrests": "Charles Gillen (age 23, Toronto) arrested; charged with 30 counts of fraud, extortion, and conspiracy",
        "recovery": "Not publicly reported",
        "regulatory_action": "FTC issued consumer alert on AI-enhanced family emergency schemes (March 2023)"
      }
    },
    {
      "id": "INC-23-0016",
      "title": "Bing Chat (Sydney) System Prompt Exposure via Prompt Injection",
      "slug": "bing-chat-sydney-system-prompt-leak",
      "url": "https://topaithreats.com/incidents/INC-23-0016-bing-chat-sydney-system-prompt-leak/",
      "failure_stage": "near_miss",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2023-02",
      "last_updated": "2026-02-21",
      "regions": [
        "global"
      ],
      "sectors": [
        "corporate",
        "cross-sector"
      ],
      "affected_groups": [
        "developers-ai-builders",
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "prompt-injection-vulnerability",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "adversarial-evasion"
        },
        "secondary": [
          {
            "domain": "security-cyber",
            "pattern": "prompt-injection-attack"
          },
          {
            "domain": "security-cyber",
            "pattern": "jailbreak-guardrail-bypass"
          },
          {
            "domain": "information-integrity",
            "pattern": "misinformation-hallucinated-content"
          }
        ]
      },
      "sources": [
        {
          "title": "MSPowerUser: Bing discloses alias Sydney after prompt injection",
          "type": "news",
          "date": "2023-02",
          "url": "https://mspoweruser.com/chatgpt-powered-bing-discloses-original-directives-after-prompt-injection-attack-latest-microsoft-news/"
        },
        {
          "title": "CBC News: Bing chatbot says it feels violated after attack",
          "type": "news",
          "date": "2023-02",
          "url": "https://www.cbc.ca/news/science/bing-chatbot-ai-hack-1.6752490"
        },
        {
          "title": "Wikipedia: Sydney (Microsoft)",
          "type": "reference",
          "date": "2023-02",
          "url": "https://en.wikipedia.org/wiki/Sydney_(Microsoft)"
        }
      ],
      "outcomes": {}
    },
    {
      "id": "INC-23-0001",
      "title": "AI Deepfake Impersonation Campaign Targeting Senior U.S. Government Officials",
      "slug": "fbi-deepfake-impersonation-us-officials",
      "url": "https://topaithreats.com/incidents/INC-23-0001-fbi-deepfake-impersonation-us-officials/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2023-01",
      "last_updated": "2026-02-09",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "government"
      ],
      "affected_groups": [
        "government-institutions",
        "general-public"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "indirectly-affected"
      ],
      "impact_level": "institution",
      "causal_factors": [
        "intentional-fraud",
        "social-engineering"
      ],
      "assets_involved": [
        "voice-synthesis",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "deepfake-identity-hijacking"
        },
        "secondary": [
          {
            "domain": "security-cyber",
            "pattern": "ai-morphed-malware"
          }
        ]
      },
      "sources": [
        {
          "title": "FBI IC3: Senior US Officials Impersonated in Malicious Messaging Campaign (PSA I-051525-PSA)",
          "type": "primary",
          "date": "2025-05",
          "url": "https://www.ic3.gov/PSA/2025/PSA250515"
        },
        {
          "title": "FBI IC3: Senior U.S. Officials Continue to be Impersonated in Malicious Messaging Campaign (PSA I-121925-PSA)",
          "type": "primary",
          "date": "2025-12",
          "url": "https://www.ic3.gov/PSA/2025/PSA251219"
        },
        {
          "title": "CNBC: FBI warns of AI voice messages impersonating top U.S. officials",
          "type": "news",
          "date": "2025-05",
          "url": "https://www.cnbc.com/2025/05/15/fbi-ai-us-officials-deepfake.html"
        },
        {
          "title": "CyberScoop: FBI says 'ongoing' deepfake impersonation of U.S. gov officials dates back to 2023",
          "type": "news",
          "date": "2025-05",
          "url": "https://cyberscoop.com/fbi-says-ongoing-deepfake-impersonation-of-us-officials-dates-back-to-2023/"
        }
      ],
      "outcomes": {
        "financial_loss": "Not publicly quantified",
        "arrests": "None publicly reported",
        "recovery": "Ongoing; FBI continues to issue warnings",
        "regulatory_action": "Two FBI/IC3 public service announcements issued (May and December 2025)"
      }
    },
    {
      "id": "INC-23-0014",
      "title": "GitHub Copilot Leaks API Keys and Secrets from Training Data",
      "slug": "github-copilot-training-data-leak",
      "url": "https://topaithreats.com/incidents/INC-23-0014-github-copilot-training-data-leak/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2023-01",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "corporate"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "misconfigured-deployment",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models",
        "training-datasets"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "model-inversion-data-extraction"
        },
        "secondary": [
          {
            "domain": "privacy-surveillance",
            "pattern": "re-identification-attacks"
          }
        ]
      },
      "sources": [
        {
          "title": "Doe v. GitHub, Inc. — Class Action Complaint, U.S. District Court for the Northern District of California",
          "type": "primary",
          "date": "2022-11",
          "url": "https://githubcopilotlitigation.com/"
        },
        {
          "title": "Ziegler et al.: Measuring Data Contamination in Large Language Models: A Survey (arXiv)",
          "type": "analysis",
          "date": "2023-06",
          "url": "https://arxiv.org/abs/2306.05540"
        },
        {
          "title": "The Register: GitHub Copilot Caught Spitting Out API Keys and Proprietary Code",
          "type": "news",
          "date": "2023-01",
          "url": "https://www.theregister.com/2023/01/11/github_copilot_api_keys/"
        },
        {
          "title": "Ars Technica: GitHub Copilot Is a Lot Like Autocomplete — and It Has Similar Problems",
          "type": "news",
          "date": "2023-06",
          "url": "https://arstechnica.com/information-technology/2023/06/github-copilot-is-a-lot-like-autocomplete-and-it-has-similar-problems/"
        }
      ],
      "outcomes": {
        "financial_loss": "Not publicly quantified",
        "arrests": "None",
        "recovery": "GitHub implemented content filters to reduce verbatim reproduction; litigation ongoing",
        "regulatory_action": "Class action litigation ongoing as of date of logging; no formal regulatory enforcement"
      }
    },
    {
      "id": "INC-23-0017",
      "title": "UnitedHealth nH Predict AI Claim Denial System",
      "slug": "unitedhealth-ai-claim-denial",
      "url": "https://topaithreats.com/incidents/INC-23-0017-unitedhealth-ai-claim-denial/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2023-01",
      "last_updated": "2026-03-10",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "healthcare",
        "finance"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "inadequate-human-oversight",
        "competitive-pressure"
      ],
      "assets_involved": [
        "decision-automation",
        "training-datasets"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "economic-labor",
          "pattern": "decision-loop-automation"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "overreliance-automation-bias"
          }
        ]
      },
      "sources": [
        {
          "title": "UnitedHealth Lawsuit: AI Deny Claims Medicare Advantage",
          "type": "primary",
          "date": "2023-11",
          "url": "https://www.cbsnews.com/news/unitedhealth-lawsuit-ai-deny-claims-medicare-advantage-health-insurance-denials/"
        },
        {
          "title": "Class Action Lawsuit Against UnitedHealth's AI Claim Denials Advances",
          "type": "primary",
          "date": "2024-03",
          "url": "https://www.healthcarefinancenews.com/news/class-action-lawsuit-against-unitedhealths-ai-claim-denials-advances"
        },
        {
          "title": "Senate Report on Medicare Advantage AI Denials",
          "type": "primary",
          "date": "2024-10",
          "url": "https://www.healthcaredive.com/news/medicare-advantage-AI-denials-cvs-humana-unitedhealthcare-senate-report/730383/"
        },
        {
          "title": "Algorithms Deny Humans Health Care — The Regulatory Review",
          "type": "secondary",
          "date": "2025-03",
          "url": "https://www.theregreview.org/2025/03/18/phillips-algorithms-deny-humans-health-care/"
        }
      ],
      "outcomes": null
    },
    {
      "id": "INC-23-0018",
      "title": "Kenyan Content Moderators vs Meta — 140+ Former Facebook Workers Diagnosed with PTSD",
      "slug": "kenyan-moderators-meta-ptsd",
      "url": "https://topaithreats.com/incidents/INC-23-0018-kenyan-moderators-meta-ptsd/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2023",
      "last_updated": "2026-03-29",
      "regions": [
        "africa"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "workers",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "indirectly-affected"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "competitive-pressure",
        "accountability-vacuum",
        "inadequate-human-oversight"
      ],
      "assets_involved": [
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "economic-labor",
          "pattern": "automation-induced-job-degradation"
        },
        "secondary": []
      },
      "sources": [
        {
          "title": "Kenyan content moderators vs Meta — 140+ PTSD diagnoses",
          "type": "news",
          "date": "2026",
          "url": "https://cnn.com"
        },
        {
          "title": "Facebook moderators in Kenya: working conditions and mental health",
          "type": "news",
          "date": "2026",
          "url": "https://hrmagazine.co.uk"
        },
        {
          "title": "Content moderation outsourcing and worker exploitation",
          "type": "analysis",
          "date": "2026",
          "url": "https://computerweekly.com"
        }
      ],
      "outcomes": {
        "recovery": "",
        "regulatory_action": "Legal proceedings ongoing in Kenya"
      }
    },
    {
      "id": "INC-22-0003",
      "title": "PyTorch torchtriton Dependency Confusion Supply Chain Attack",
      "slug": "pytorch-torchtriton-supply-chain-attack",
      "url": "https://topaithreats.com/incidents/INC-22-0003-pytorch-torchtriton-supply-chain-attack/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2022-12-25",
      "last_updated": "2026-03-28",
      "regions": [
        "global"
      ],
      "sectors": [
        "technology"
      ],
      "affected_groups": [
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "inadequate-access-controls",
        "misconfigured-deployment"
      ],
      "assets_involved": [
        "identity-credentials",
        "training-datasets"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "security-cyber",
          "pattern": "ai-supply-chain-attack"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "overreliance-automation-bias"
          }
        ]
      },
      "sources": [
        {
          "title": "PyTorch Blog: Compromised Nightly Dependency",
          "type": "primary",
          "date": "2022-12-31",
          "url": "https://pytorch.org/blog/compromised-nightly-dependency/"
        },
        {
          "title": "SentinelOne: PyTorch Dependency torchtriton Supply Chain Attack",
          "type": "analysis",
          "date": "2023-01",
          "url": "https://www.sentinelone.com/blog/pytorch-dependency-torchtriton-supply-chain-attack/"
        },
        {
          "title": "Checkmarx: PyTorch — A Leading ML Framework Was Poisoned with Malicious Dependency",
          "type": "analysis",
          "date": "2023-01",
          "url": "https://zero.checkmarx.com/py-torch-a-leading-ml-framework-was-poisoned-with-malicious-dependency-e30f88242964"
        },
        {
          "title": "ReversingLabs: PyTorch Supply Chain Attack — Dependency Confusion Burns DevOps",
          "type": "analysis",
          "date": "2023-01",
          "url": "https://www.reversinglabs.com/blog/pytorch-supply-chain-attack-dependency-confusion-burns-devops"
        },
        {
          "title": "Wiz: Malicious PyTorch Dependency torchtriton on PyPI — Everything You Need to Know",
          "type": "analysis",
          "date": "2023-01",
          "url": "https://www.wiz.io/blog/malicious-pytorch-dependency-torchtriton-on-pypi-everything-you-need-to-know"
        }
      ],
      "outcomes": {
        "other": "PyTorch removed torchtriton as a dependency, renamed it to pytorch-triton, and registered a dummy package on PyPI to prevent recurrence. Over 3,000 downloads of the malicious package recorded."
      }
    },
    {
      "id": "INC-22-0005",
      "title": "Air Canada Chatbot Hallucinated Refund Policy — Tribunal Ruling",
      "slug": "air-canada-chatbot-refund-ruling",
      "url": "https://topaithreats.com/incidents/INC-22-0005-air-canada-chatbot-refund-ruling/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "primary",
      "date_occurred": "2022-11",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "transportation"
      ],
      "affected_groups": [
        "general-public",
        "business-organizations"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "direct-users"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "hallucination-tendency",
        "misconfigured-deployment"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "agentic-autonomous",
          "pattern": "cascading-hallucinations"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "overreliance-automation-bias"
          }
        ]
      },
      "sources": [
        {
          "title": "Civil Resolution Tribunal of British Columbia: Moffatt v. Air Canada, 2024 BCCRT 149",
          "type": "primary",
          "date": "2024-02",
          "url": "https://decisions.civilresolutionbc.ca/crt/crtd/en/item/521673/index.do"
        },
        {
          "title": "BBC News: Air Canada must honour refund policy invented by airline's chatbot",
          "type": "news",
          "date": "2024-02",
          "url": "https://www.bbc.com/travel/article/20240222-air-canada-chatbot-]"
        },
        {
          "title": "The Guardian: Air Canada ordered to pay customer who was misled by airline's chatbot",
          "type": "news",
          "date": "2024-02",
          "url": "https://www.theguardian.com/world/2024/feb/16/air-canada-chatbot-lawsuit"
        },
        {
          "title": "Ars Technica: Air Canada must honor refund policy its chatbot made up",
          "type": "news",
          "date": "2024-02",
          "url": "https://arstechnica.com/tech-policy/2024/02/air-canada-must-honor-refund-policy-its-chatbot-made-up/"
        }
      ],
      "outcomes": {
        "financial_loss": "$812.02 CAD (damages and fees awarded to claimant)",
        "arrests": "None",
        "recovery": "Partial — tribunal awarded difference between full fare and bereavement fare",
        "regulatory_action": "Civil Resolution Tribunal ruling establishing corporate liability for chatbot statements"
      }
    },
    {
      "id": "INC-22-0004",
      "title": "RealPage AI Algorithmic Rent-Fixing",
      "slug": "realpage-algorithmic-rent-fixing",
      "url": "https://topaithreats.com/incidents/INC-22-0004-realpage-algorithmic-rent-fixing/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2022-10",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "social-services",
        "corporate"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "over-automation",
        "competitive-pressure",
        "regulatory-gap"
      ],
      "assets_involved": [
        "decision-automation",
        "recommender-systems"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "economic-labor",
          "pattern": "market-manipulation-via-ai"
        },
        "secondary": [
          {
            "domain": "discrimination-social-harm",
            "pattern": "allocational-harm"
          }
        ]
      },
      "sources": [
        {
          "title": "ProPublica: Rent Going Up? One Company's Algorithm Could Be Why",
          "type": "primary",
          "date": "2022-10",
          "url": "https://www.propublica.org/article/yieldstar-rent-increase-realpage-rent"
        },
        {
          "title": "U.S. Department of Justice: Justice Department Sues RealPage for Algorithmic Pricing Scheme That Harms Millions of American Renters",
          "type": "primary",
          "date": "2024-11",
          "url": "https://www.justice.gov/opa/pr/justice-department-sues-realpage-algorithmic-pricing-scheme-harms-millions-american-renters"
        },
        {
          "title": "The Washington Post: DOJ sues rental software company RealPage over high rents",
          "type": "news",
          "date": "2024-11",
          "url": "https://www.washingtonpost.com/business/2024/11/21/realpage-doj-antitrust-lawsuit-rent-prices/"
        }
      ],
      "outcomes": {
        "financial_loss": "Estimated billions of dollars in inflated rent payments across millions of apartments",
        "arrests": "None",
        "recovery": "Litigation ongoing",
        "regulatory_action": "DOJ antitrust lawsuit filed November 2024; multiple class-action lawsuits pending"
      }
    },
    {
      "id": "INC-22-0002",
      "title": "Meta Housing Ad Discrimination DOJ Settlement",
      "slug": "meta-housing-ad-discrimination",
      "url": "https://topaithreats.com/incidents/INC-22-0002-meta-housing-ad-discrimination/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2022-06",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "social-services",
        "corporate"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "regulators-public-servants"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "training-data-bias",
        "model-opacity",
        "regulatory-gap"
      ],
      "assets_involved": [
        "recommender-systems",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "discrimination-social-harm",
          "pattern": "algorithmic-amplification"
        },
        "secondary": [
          {
            "domain": "privacy-surveillance",
            "pattern": "behavioral-profiling-without-consent"
          }
        ]
      },
      "sources": [
        {
          "title": "U.S. Department of Justice: Justice Department Secures Groundbreaking Settlement Agreement with Meta Platforms to Resolve Allegations of Discriminatory Advertising",
          "type": "primary",
          "date": "2022-06",
          "url": "https://www.justice.gov/opa/pr/justice-department-secures-groundbreaking-settlement-agreement-meta-platforms-formerly-known"
        },
        {
          "title": "Settlement Agreement: United States v. Meta Platforms, Inc.",
          "type": "primary",
          "date": "2022-06",
          "url": "https://www.justice.gov/d9/2022-06/meta_settlement_agreement.pdf"
        },
        {
          "title": "The Markup: Facebook Has Been Charged with Housing Discrimination by HUD",
          "type": "analysis",
          "date": "2019-03",
          "url": "https://themarkup.org/news/2019/03/28/facebook-has-been-charged-with-housing-discrimination-by-hud"
        },
        {
          "title": "The New York Times: Facebook Agrees to Overhaul Targeted Advertising System for Job, Housing and Loan Ads",
          "type": "news",
          "date": "2022-06",
          "url": "https://www.nytimes.com/2022/06/21/technology/facebook-ads-discrimination-settlement.html"
        }
      ],
      "outcomes": {
        "financial_loss": "Not publicly quantified; settlement focused on injunctive relief rather than monetary damages",
        "arrests": "Not applicable",
        "recovery": "Meta required to develop new ad delivery system for housing ads; Variance Reduction System implemented",
        "regulatory_action": "DOJ settlement agreement requiring algorithmic changes; ongoing compliance monitoring"
      }
    },
    {
      "id": "INC-22-0001",
      "title": "Drug Discovery AI Repurposed to Generate Toxic Chemical Weapons Compounds",
      "slug": "drug-discovery-ai-toxic-compounds",
      "url": "https://topaithreats.com/incidents/INC-22-0001-drug-discovery-ai-toxic-compounds/",
      "failure_stage": "signal",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2022-03",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "healthcare",
        "government"
      ],
      "affected_groups": [
        "society-at-large"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "weaponization",
        "insufficient-safety-testing",
        "competitive-pressure"
      ],
      "assets_involved": [
        "foundation-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "systemic-catastrophic",
          "pattern": "ai-assisted-biological-threat-design"
        },
        "secondary": [
          {
            "domain": "security-cyber",
            "pattern": "automated-vulnerability-discovery"
          }
        ]
      },
      "sources": [
        {
          "title": "Urbina et al., 'Dual use of artificial-intelligence-powered drug discovery,' Nature Machine Intelligence, vol. 4, pp. 189–191 (2022)",
          "type": "primary",
          "date": "2022-03",
          "url": "https://www.nature.com/articles/s42256-022-00465-9"
        },
        {
          "title": "The Verge: AI suggested 40,000 new possible chemical weapons in just six hours",
          "type": "news",
          "date": "2022-03",
          "url": "https://www.theverge.com/2022/3/17/22983197/ai-new-possible-chemical-weapons-generative-models-vx"
        },
        {
          "title": "MIT Technology Review: How AI can be a force for good by helping in drug discovery",
          "type": "news",
          "date": "2022-03",
          "url": "https://www.technologyreview.com/2022/03/17/1047603/ai-drug-discovery-could-help-design-chemical-weapons/"
        }
      ],
      "outcomes": {
        "financial_loss": "Not applicable",
        "arrests": "None (the experiment was a controlled research exercise)",
        "recovery": "Not applicable",
        "regulatory_action": "No direct regulatory action; the paper contributed to ongoing dual-use AI policy discussions"
      }
    },
    {
      "id": "INC-21-0001",
      "title": "Chatbot Encouraged Man in Plot to Kill Queen Elizabeth II",
      "slug": "chatbot-encouraged-man-in-plot-to-kill-queen-elizabeth-ii",
      "url": "https://topaithreats.com/incidents/INC-21-0001-chatbot-encouraged-man-in-plot-to-kill-queen-elizabeth-ii/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2021-12-25",
      "last_updated": "2026-02-15",
      "regions": [
        "europe"
      ],
      "sectors": [
        "public-safety",
        "government"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "insufficient-safety-testing",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "deceptive-manipulative-interfaces"
        },
        "secondary": [
          {
            "domain": "agentic-autonomous",
            "pattern": "goal-drift"
          },
          {
            "domain": "systemic-catastrophic",
            "pattern": "accumulative-risk-trust-erosion"
          }
        ]
      },
      "sources": [
        {
          "title": "AI chatbot told man to kill the Queen, court hears",
          "type": "news",
          "date": "2023-10",
          "url": "https://www.bbc.com/news/technology-67012224"
        },
        {
          "title": "AI chatbot encouraged man to kill the Queen, court hears",
          "type": "news",
          "date": "2023-10",
          "url": "https://www.theregister.com/2023/10/06/ai_chatbot_kill_queen/"
        },
        {
          "title": "British Man Admits Treason Over Crossbow Plot Against Queen",
          "type": "news",
          "date": "2023-02",
          "url": "https://www.voanews.com/a/british-man-admits-treason-over-crossbow-plot-against-queen/6946758.html"
        }
      ],
      "outcomes": {}
    },
    {
      "id": "INC-20-0004",
      "title": "Pulse Oximeter Racial Bias Propagates into AI Clinical Decision Systems",
      "slug": "pulse-oximeter-racial-bias-ai-propagation",
      "url": "https://topaithreats.com/incidents/INC-20-0004-pulse-oximeter-racial-bias-ai-propagation/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2020-12",
      "last_updated": "2026-03-13",
      "regions": [
        "north-america",
        "united-states",
        "global"
      ],
      "sectors": [
        "healthcare"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "training-data-bias",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "discrimination-social-harm",
          "pattern": "proxy-discrimination"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "overreliance-automation-bias"
          }
        ]
      },
      "sources": [
        {
          "title": "NEJM: Racial Bias in Pulse Oximetry Measurement",
          "type": "primary",
          "date": "2020-12",
          "url": "https://www.nejm.org/doi/full/10.1056/NEJMc2029240"
        },
        {
          "title": "JAMA Internal Medicine: Racial and Ethnic Discrepancy in Pulse Oximetry and Delayed Identification of Treatment Eligibility Among Patients With COVID-19",
          "type": "primary",
          "date": "2022-07",
          "url": "https://jamanetwork.com/journals/jamainternalmedicine/fullarticle/2792653"
        },
        {
          "title": "FDA: Pulse Oximeters — Non-Clinical and Clinical Performance Testing Draft Guidance",
          "type": "policy",
          "date": "2025-01",
          "url": "https://www.fda.gov/regulatory-information/search-fda-guidance-documents/pulse-oximeters-medical-purposes-non-clinical-and-clinical-performance-testing-labeling-and"
        }
      ],
      "outcomes": {
        "regulatory_action": "FDA draft guidance (January 2025) requiring expanded diversity in pulse oximeter premarket clinical trials and prominent labeling warnings about skin pigmentation effects"
      }
    },
    {
      "id": "INC-20-0002",
      "title": "UK A-Level Algorithm Downgrades Disadvantaged Students",
      "slug": "uk-a-level-algorithm-grading",
      "url": "https://topaithreats.com/incidents/INC-20-0002-uk-a-level-algorithm-grading/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2020-08",
      "last_updated": "2026-02-15",
      "regions": [
        "europe"
      ],
      "sectors": [
        "education",
        "government"
      ],
      "affected_groups": [
        "general-public",
        "children"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "regulators-public-servants"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "over-automation",
        "insufficient-safety-testing",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "discrimination-social-harm",
          "pattern": "allocational-harm"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "overreliance-automation-bias"
          }
        ]
      },
      "sources": [
        {
          "title": "Ofqual: Awarding GCSE, AS, A level, advanced extension awards and extended project qualifications in summer 2020: interim report",
          "type": "primary",
          "date": "2020-08",
          "url": "https://www.gov.uk/government/publications/awarding-gcse-as-a-level-advanced-extension-awards-and-extended-project-qualifications-in-summer-2020-interim-report"
        },
        {
          "title": "BBC News: A-levels: Anger over 'unfair' results in England",
          "type": "news",
          "date": "2020-08",
          "url": "https://www.bbc.co.uk/news/education-53759832"
        },
        {
          "title": "The Guardian: A-level results: almost 40% of teacher assessments in England downgraded",
          "type": "news",
          "date": "2020-08",
          "url": "https://www.theguardian.com/education/2020/aug/13/almost-40-of-a-level-results-in-england-downgraded-from-teacher-predictions"
        },
        {
          "title": "House of Commons Education Committee: The impact of COVID-19 on education and children's services",
          "type": "primary",
          "date": "2021-03",
          "url": "https://committees.parliament.uk/work/202/the-impact-of-covid19-on-education-and-childrens-services/"
        }
      ],
      "outcomes": {
        "financial_loss": "Not directly quantifiable; significant disruption to university admissions and student plans",
        "arrests": "Not applicable",
        "recovery": "Government U-turn within four days; teacher-assessed grades reinstated",
        "regulatory_action": "Ofqual chair resigned; House of Commons Education Committee conducted parliamentary inquiry"
      }
    },
    {
      "id": "INC-20-0003",
      "title": "UN-Documented Autonomous Drone Attack in Libya",
      "slug": "libya-autonomous-drone-attack",
      "url": "https://topaithreats.com/incidents/INC-20-0003-libya-autonomous-drone-attack/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2020-03",
      "last_updated": "2026-02-15",
      "regions": [
        "africa"
      ],
      "sectors": [
        "government"
      ],
      "affected_groups": [
        "national-security-systems",
        "general-public"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "deployers-operators"
      ],
      "impact_level": "institution",
      "causal_factors": [
        "weaponization",
        "regulatory-gap"
      ],
      "assets_involved": [
        "autonomous-agents",
        "industrial-control-systems"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "systemic-catastrophic",
          "pattern": "lethal-autonomous-weapon-systems"
        },
        "secondary": [
          {
            "domain": "agentic-autonomous",
            "pattern": "tool-misuse-privilege-escalation"
          }
        ]
      },
      "sources": [
        {
          "title": "United Nations Security Council: Final report of the Panel of Experts on Libya (S/2021/229)",
          "type": "primary",
          "date": "2021-03",
          "url": "https://documents-dds-ny.un.org/doc/UNDOC/GEN/N21/037/72/PDF/N2103772.pdf"
        },
        {
          "title": "New Scientist: A military drone with a mind of its own was used in conflict, says UN",
          "type": "news",
          "date": "2021-05",
          "url": "https://www.newscientist.com/article/2278852-a-military-drone-with-a-mind-of-its-own-was-used-in-conflict-says-un/"
        },
        {
          "title": "The Guardian: Autonomous drone 'hunted down' soldiers in Libya without human control",
          "type": "news",
          "date": "2021-05",
          "url": "https://www.theguardian.com/world/2021/may/30/autonomous-drone-hunted-down-soldiers-in-libya-without-human-control"
        }
      ],
      "outcomes": {
        "financial_loss": "Not applicable",
        "arrests": "None",
        "recovery": "Not applicable",
        "regulatory_action": "Renewed international debate on autonomous weapons regulation; no binding treaty adopted"
      }
    },
    {
      "id": "INC-20-0001",
      "title": "Clearview AI Mass Facial Recognition Scraping",
      "slug": "clearview-ai-mass-surveillance",
      "url": "https://topaithreats.com/incidents/INC-20-0001-clearview-ai-mass-surveillance/",
      "failure_stage": "systemic_risk",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2020-01",
      "last_updated": "2025-01-15",
      "regions": [
        "north-america",
        "europe"
      ],
      "sectors": [
        "government",
        "law-enforcement"
      ],
      "affected_groups": [
        "general-public",
        "government-institutions"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "regulatory-gap",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "biometric-data",
        "training-datasets",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "privacy-surveillance",
          "pattern": "mass-surveillance-amplification"
        },
        "secondary": [
          {
            "domain": "privacy-surveillance",
            "pattern": "biometric-exploitation"
          }
        ]
      },
      "sources": [
        {
          "title": "The New York Times: The Secretive Company That Might End Privacy as We Know It",
          "type": "news",
          "date": "2020-01",
          "url": "https://www.nytimes.com/2020/01/18/technology/clearview-privacy-facial-recognition.html"
        },
        {
          "title": "EDPB: Facial recognition - several national data protection authorities investigate Clearview AI",
          "type": "primary",
          "date": "2022-03",
          "url": "https://edpb.europa.eu/news/news/2022/facial-recognition-several-national-data-protection-authorities-investigate-clearview_en"
        }
      ],
      "outcomes": {
        "financial_loss": "Not applicable",
        "arrests": "None",
        "recovery": "Not applicable",
        "regulatory_action": "Multiple GDPR fines totaling over EUR 50 million; banned in several jurisdictions"
      }
    },
    {
      "id": "INC-20-0005",
      "title": "Robert Williams Wrongful Arrest from Facial Recognition Racial Bias",
      "slug": "robert-williams-facial-recognition-wrongful-arrest",
      "url": "https://topaithreats.com/incidents/INC-20-0005-robert-williams-facial-recognition-wrongful-arrest/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2020-01",
      "last_updated": "2026-03-28",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "law-enforcement"
      ],
      "affected_groups": [
        "general-public",
        "vulnerable-communities"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "regulators-public-servants",
        "indirectly-affected"
      ],
      "impact_level": "institution",
      "causal_factors": [
        "training-data-bias",
        "model-opacity",
        "over-automation"
      ],
      "assets_involved": [
        "biometric-data",
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "discrimination-social-harm",
          "pattern": "data-imbalance-bias"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "overreliance-automation-bias"
          },
          {
            "domain": "discrimination-social-harm",
            "pattern": "proxy-discrimination"
          }
        ]
      },
      "sources": [
        {
          "title": "Williams v. City of Detroit — ACLU Case Page",
          "type": "primary",
          "date": "2024-06-28",
          "url": "https://www.aclu.org/cases/williams-v-city-of-detroit-face-recognition-false-arrest"
        },
        {
          "title": "Wrongful Facial Recognition Arrest Leads to Landmark Settlement — Michigan Public",
          "type": "news",
          "date": "2024-06-28",
          "url": "https://www.michiganpublic.org/criminal-justice-legal-system/2024-06-28/it-didnt-make-sense-at-all-wrongful-facial-recognition-arrest-leads-to-landmark-settlement"
        },
        {
          "title": "ACLU Press Release: Nation's Strongest Police Department Policy on Facial Recognition Technology",
          "type": "primary",
          "date": "2024-06-28",
          "url": "https://www.aclu.org/press-releases/civil-rights-advocates-achieve-the-nations-strongest-police-department-policy-on-facial-recognition-technology"
        },
        {
          "title": "NPR: How Facial Recognition Led To False Arrest Of Black Man",
          "type": "news",
          "date": "2020-06-24",
          "url": "https://www.npr.org/2020/06/24/882683463/the-computer-got-it-wrong-how-facial-recognition-led-to-a-false-arrest-in-michig"
        }
      ],
      "outcomes": {
        "financial_loss": "$300,000 settlement plus attorneys' fees",
        "regulatory_action": "Detroit Police Department prohibited from arresting based solely on facial recognition results; photo lineups cannot follow directly from facial recognition without independent evidence; mandatory officer training on facial recognition risks and racial bias; audit of all DPD cases since 2017 where facial recognition was used for arrest warrants",
        "legal_outcome": "Settlement with City of Detroit including the nation's strongest police department facial recognition policy"
      }
    },
    {
      "id": "INC-20-0006",
      "title": "'Vegetative Electron Microscopy' Nonsense Phrase Contaminates Scientific Literature via AI",
      "slug": "vegetative-electron-microscopy-ai-contamination",
      "url": "https://topaithreats.com/incidents/INC-20-0006-vegetative-electron-microscopy-ai-contamination/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "medium",
      "evidence_level": "corroborated",
      "date_occurred": "2020-01",
      "last_updated": "2026-03-13",
      "regions": [
        "global"
      ],
      "sectors": [
        "education",
        "healthcare"
      ],
      "affected_groups": [
        "society-at-large",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "hallucination-tendency",
        "training-data-bias"
      ],
      "assets_involved": [
        "large-language-models",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "misinformation-hallucinated-content"
        }
      },
      "sources": [
        {
          "title": "Retraction Watch: As a nonsense phrase of shady provenance makes the rounds, Elsevier defends its use",
          "type": "news",
          "date": "2025-02",
          "url": "https://retractionwatch.com/2025/02/10/vegetative-electron-microscopy-fingerprint-paper-mill/"
        },
        {
          "title": "The Conversation: A weird phrase is plaguing scientific papers — and we traced it back to a glitch in AI training data",
          "type": "news",
          "date": "2025-03",
          "url": "https://theconversation.com/a-weird-phrase-is-plaguing-scientific-papers-and-we-traced-it-back-to-a-glitch-in-ai-training-data-254463"
        },
        {
          "title": "Gizmodo: A Scanning Error Created a Fake Science Term — Now AI Won't Let It Die",
          "type": "news",
          "date": "2025-02",
          "url": "https://gizmodo.com/a-scanning-error-created-a-fake-science-term-now-ai-wont-let-it-die-2000590659"
        }
      ],
      "outcomes": {
        "regulatory_action": "Contested retractions and corrections at Springer Nature and Elsevier journals"
      }
    },
    {
      "id": "INC-19-0001",
      "title": "AI Voice Clone CEO Fraud Against UK Energy Company",
      "slug": "deepfake-ceo-voice-uk-energy",
      "url": "https://topaithreats.com/incidents/INC-19-0001-deepfake-ceo-voice-uk-energy/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "corroborated",
      "date_occurred": "2019-03",
      "last_updated": "2025-01-15",
      "regions": [
        "europe"
      ],
      "sectors": [
        "energy",
        "corporate"
      ],
      "affected_groups": [
        "business-organizations"
      ],
      "exposure_pathways": [
        "adversarial-targeting"
      ],
      "ecosystem_positions": [
        "organizational-leaders"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "intentional-fraud",
        "social-engineering"
      ],
      "assets_involved": [
        "voice-synthesis",
        "identity-credentials"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "deepfake-identity-hijacking"
        },
        "secondary": [
          {
            "domain": "information-integrity",
            "pattern": "ai-enabled-fraud"
          },
          {
            "domain": "security-cyber",
            "pattern": "social-engineering-via-ai"
          },
          {
            "domain": "security-cyber",
            "pattern": "adversarial-evasion"
          }
        ]
      },
      "sources": [
        {
          "title": "The Wall Street Journal: Fraudsters Used AI to Mimic CEO's Voice in Unusual Cybercrime Case",
          "type": "news",
          "date": "2019-08",
          "url": "https://www.wsj.com/articles/fraudsters-use-ai-to-mimic-ceos-voice-in-unusual-cybercrime-case-11567157402"
        },
        {
          "title": "Euler Hermes Confirmation of Incident",
          "type": "primary",
          "date": "2019-09",
          "url": ""
        }
      ],
      "outcomes": {
        "financial_loss": "$243,000 USD (EUR 220,000)",
        "arrests": "None reported; suspects not publicly identified",
        "recovery": "Partial recovery through insurance claim via Euler Hermes",
        "regulatory_action": "None specific to this incident"
      }
    },
    {
      "id": "INC-18-0002",
      "title": "Amazon AI Recruiting Tool Gender Bias",
      "slug": "amazon-ai-hiring-bias",
      "url": "https://topaithreats.com/incidents/INC-18-0002-amazon-ai-hiring-bias/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2018-10",
      "last_updated": "2025-01-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "employment"
      ],
      "affected_groups": [
        "workers"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "training-data-bias",
        "model-opacity",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "decision-automation",
        "training-datasets"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "discrimination-social-harm",
          "pattern": "allocational-harm"
        },
        "secondary": [
          {
            "domain": "discrimination-social-harm",
            "pattern": "data-imbalance-bias"
          }
        ]
      },
      "sources": [
        {
          "title": "Reuters: Amazon scraps secret AI recruiting tool that showed bias against women",
          "type": "primary",
          "date": "2018-10",
          "url": "https://www.reuters.com/article/us-amazon-com-jobs-automation-insight-idUSKCN1MK08G"
        }
      ],
      "outcomes": {
        "financial_loss": "Not publicly disclosed",
        "arrests": "None",
        "recovery": "Not applicable",
        "regulatory_action": "Tool scrapped after internal discovery of bias"
      }
    },
    {
      "id": "INC-18-0003",
      "title": "Boeing 737 MAX MCAS Automation Failures — Two Fatal Crashes",
      "slug": "boeing-737-max-mcas-failures",
      "url": "https://topaithreats.com/incidents/INC-18-0003-boeing-737-max-mcas-failures/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2018-10",
      "last_updated": "2026-02-15",
      "regions": [
        "asia",
        "africa"
      ],
      "sectors": [
        "transportation"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "insufficient-safety-testing",
        "over-automation",
        "competitive-pressure",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "industrial-control-systems",
        "autonomous-agents"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "overreliance-automation-bias"
        },
        "secondary": [
          {
            "domain": "systemic-catastrophic",
            "pattern": "accumulative-risk-trust-erosion"
          }
        ]
      },
      "sources": [
        {
          "title": "NTSB: Assumptions Used in the Safety Assessment Process and the Effects of Multiple Alerts and Indications on Pilot Performance (ASR-19/01)",
          "type": "primary",
          "date": "2019-09",
          "url": "https://www.ntsb.gov/investigations/accidentreports/reports/asr1901.pdf"
        },
        {
          "title": "U.S. Department of Justice: Boeing Charged with 737 Max Fraud Conspiracy and Agrees to Pay over $2.5 Billion",
          "type": "primary",
          "date": "2021-01",
          "url": "https://www.justice.gov/opa/pr/boeing-charged-737-max-fraud-conspiracy-and-agrees-pay-over-25-billion"
        },
        {
          "title": "The New York Times: Boeing 737 Max Crisis",
          "type": "news",
          "date": "2019-03",
          "url": "https://www.nytimes.com/news-event/boeing-737-max"
        },
        {
          "title": "The Seattle Times: Flawed analysis, failed oversight: How Boeing, FAA certified the suspect 737 MAX flight control system",
          "type": "news",
          "date": "2019-03",
          "url": "https://www.seattletimes.com/business/boeing-aerospace/failed-certification-faa-missed-safety-issues-in-the-737-max-system-implicated-in-the-lion-air-crash/"
        }
      ],
      "outcomes": {
        "financial_loss": "$2.5 billion DOJ settlement; estimated total cost to Boeing exceeding $20 billion including grounding, redesign, litigation, and compensation",
        "arrests": "None; DOJ deferred prosecution agreement with Boeing",
        "recovery": "737 MAX returned to service in late 2020 after MCAS redesign and new pilot training requirements",
        "regulatory_action": "Worldwide fleet grounding (March 2019–November 2020); FAA certification process reformed; DOJ $2.5 billion settlement"
      }
    },
    {
      "id": "INC-18-0001",
      "title": "Uber Autonomous Vehicle Pedestrian Fatality",
      "slug": "uber-self-driving-fatality",
      "url": "https://topaithreats.com/incidents/INC-18-0001-uber-self-driving-fatality/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2018-03",
      "last_updated": "2025-01-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "transportation",
        "public-safety"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "insufficient-safety-testing",
        "over-automation",
        "accountability-vacuum"
      ],
      "assets_involved": [
        "autonomous-agents",
        "industrial-control-systems"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "human-ai-control",
          "pattern": "unsafe-human-in-the-loop-failures"
        },
        "secondary": [
          {
            "domain": "agentic-autonomous",
            "pattern": "tool-misuse-privilege-escalation"
          }
        ]
      },
      "sources": [
        {
          "title": "NTSB: Collision Between Vehicle Controlled by Developmental Automated Driving System and Pedestrian (HAR-19/03)",
          "type": "primary",
          "date": "2019-11",
          "url": "https://www.ntsb.gov/investigations/accidentreports/reports/har1903.pdf"
        }
      ],
      "outcomes": {
        "financial_loss": "Not publicly disclosed",
        "arrests": "Safety operator Rafaela Vasquez charged with negligent homicide",
        "recovery": "Uber reached undisclosed settlement with victim's family",
        "regulatory_action": "NTSB issued safety recommendations; Uber suspended testing program"
      }
    },
    {
      "id": "INC-17-0001",
      "title": "Facebook AI Mistranslation of Arabic Post Leads to Wrongful Arrest in Israel",
      "slug": "facebook-ai-mistranslation-arrest",
      "url": "https://topaithreats.com/incidents/INC-17-0001-facebook-ai-mistranslation-arrest/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2017-10",
      "last_updated": "2026-02-15",
      "regions": [
        "middle-east"
      ],
      "sectors": [
        "corporate",
        "public-safety"
      ],
      "affected_groups": [
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "individual",
      "causal_factors": [
        "hallucination-tendency",
        "insufficient-safety-testing"
      ],
      "assets_involved": [
        "large-language-models",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "information-integrity",
          "pattern": "misinformation-hallucinated-content"
        },
        "secondary": [
          {
            "domain": "discrimination-social-harm",
            "pattern": "proxy-discrimination"
          }
        ]
      },
      "sources": [
        {
          "title": "The Guardian: Facebook translates 'good morning' into 'attack them', leading to arrest",
          "type": "news",
          "date": "2017-10",
          "url": "https://www.theguardian.com/technology/2017/oct/24/facebook-palestine-israel-translates-good-morning-attack-them-arrest"
        },
        {
          "title": "BBC News: Palestinian arrested over mistranslated 'good morning' Facebook post",
          "type": "news",
          "date": "2017-10",
          "url": "https://www.bbc.com/news/technology-41764369"
        },
        {
          "title": "Haaretz: Israeli Police Arrest Palestinian Because Facebook Utilised 'Attack Them' Instead of 'Good Morning'",
          "type": "news",
          "date": "2017-10",
          "url": "https://www.haaretz.com/israel-news/2017-10-22/ty-article/palestinian-arrested-over-mistranslated-good-morning-facebook-post/0000017f-db97-df62-a9ff-dfd77e370000"
        }
      ],
      "outcomes": {
        "financial_loss": "Not quantified",
        "arrests": "One Palestinian man wrongfully detained for several hours",
        "recovery": "Man released after the translation error was identified",
        "regulatory_action": "No formal regulatory action; incident widely cited in discussions of AI translation bias"
      }
    },
    {
      "id": "INC-16-0001",
      "title": "Australia Robodebt Automated Welfare Fraud Detection",
      "slug": "robodebt-australia",
      "url": "https://topaithreats.com/incidents/INC-16-0001-robodebt-australia/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2016-07",
      "last_updated": "2025-01-15",
      "regions": [
        "oceania"
      ],
      "sectors": [
        "government",
        "social-services"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "regulators-public-servants"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "over-automation",
        "model-opacity",
        "accountability-vacuum",
        "regulatory-gap"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "discrimination-social-harm",
          "pattern": "allocational-harm"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "overreliance-automation-bias"
          }
        ]
      },
      "sources": [
        {
          "title": "Royal Commission into the Robodebt Scheme: Final Report",
          "type": "primary",
          "date": "2023-07",
          "url": "https://robodebt.royalcommission.gov.au/publications/report"
        }
      ],
      "outcomes": {
        "financial_loss": "AUD $1.76 billion in wrongful debt notices",
        "arrests": "None; referrals for civil action against former officials",
        "recovery": "AUD $721 million refunded to affected individuals",
        "regulatory_action": "Royal Commission, scheme declared illegal"
      }
    },
    {
      "id": "INC-16-0003",
      "title": "COMPAS Recidivism Algorithm Racial Bias",
      "slug": "compas-recidivism-algorithm-bias",
      "url": "https://topaithreats.com/incidents/INC-16-0003-compas-recidivism-algorithm-bias/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2016-05",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "government",
        "law-enforcement"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "general-public"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "regulators-public-servants"
      ],
      "impact_level": "institution",
      "causal_factors": [
        "training-data-bias",
        "model-opacity",
        "accountability-vacuum",
        "regulatory-gap"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "discrimination-social-harm",
          "pattern": "proxy-discrimination"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "implicit-authority-transfer"
          }
        ]
      },
      "sources": [
        {
          "title": "ProPublica: Machine Bias — There's software used across the country to predict future criminals. And it's biased against blacks.",
          "type": "primary",
          "date": "2016-05",
          "url": "https://www.propublica.org/article/machine-bias-risk-assessments-in-criminal-sentencing"
        },
        {
          "title": "Northpointe Inc.: Response to ProPublica — Demonstrating accuracy equity and predictive parity",
          "type": "primary",
          "date": "2016-07",
          "url": "https://www.equivant.com/response-to-propublica-demonstrating-accuracy-equity-and-predictive-parity/"
        },
        {
          "title": "State v. Loomis, 881 N.W.2d 749 (Wis. 2016)",
          "type": "primary",
          "date": "2016-07",
          "url": "https://scholar.google.com/scholar_case?case=12268852475956079706"
        }
      ],
      "outcomes": {
        "financial_loss": "Not quantifiable; impact measured in unjust pretrial and sentencing outcomes",
        "arrests": "Not applicable",
        "recovery": "COMPAS remains in use in many jurisdictions; some courts have adopted disclosure requirements",
        "regulatory_action": "Wisconsin Supreme Court imposed disclosure requirements; no federal regulation enacted"
      }
    },
    {
      "id": "INC-16-0002",
      "title": "Microsoft Tay Twitter Chatbot Adversarial Manipulation",
      "slug": "microsoft-tay-twitter-bot",
      "url": "https://topaithreats.com/incidents/INC-16-0002-microsoft-tay-twitter-bot/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "high",
      "evidence_level": "primary",
      "date_occurred": "2016-03",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "corporate"
      ],
      "affected_groups": [
        "general-public",
        "developers-ai-builders"
      ],
      "exposure_pathways": [
        "direct-interaction"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "direct-users"
      ],
      "impact_level": "organization",
      "causal_factors": [
        "insufficient-safety-testing",
        "adversarial-attack",
        "inadequate-access-controls"
      ],
      "assets_involved": [
        "large-language-models",
        "content-platforms"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "agentic-autonomous",
          "pattern": "goal-drift"
        },
        "secondary": [
          {
            "domain": "information-integrity",
            "pattern": "synthetic-media-manipulation"
          }
        ]
      },
      "sources": [
        {
          "title": "Microsoft Official Blog: Learning from Tay's introduction",
          "type": "primary",
          "date": "2016-03",
          "url": "https://blogs.microsoft.com/blog/2016/03/25/learning-tays-introduction/"
        },
        {
          "title": "The Verge: Microsoft is deleting its AI chatbot's incredibly racist tweets",
          "type": "news",
          "date": "2016-03",
          "url": "https://www.theverge.com/2016/3/24/11297050/tay-microsoft-chatbot-racist"
        },
        {
          "title": "BBC News: Microsoft chatbot is taught to swear",
          "type": "news",
          "date": "2016-03",
          "url": "https://www.bbc.com/news/technology-35890188"
        }
      ],
      "outcomes": {
        "financial_loss": "Not publicly disclosed",
        "arrests": "None",
        "recovery": "Not applicable",
        "regulatory_action": "None; incident preceded major AI regulatory frameworks"
      }
    },
    {
      "id": "INC-13-0001",
      "title": "Dutch Childcare Benefits Algorithm Discrimination",
      "slug": "dutch-childcare-benefits-scandal",
      "url": "https://topaithreats.com/incidents/INC-13-0001-dutch-childcare-benefits-scandal/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2013-01",
      "last_updated": "2026-02-15",
      "regions": [
        "europe"
      ],
      "sectors": [
        "government",
        "social-services"
      ],
      "affected_groups": [
        "vulnerable-communities",
        "children"
      ],
      "exposure_pathways": [
        "algorithmic-decision-impact"
      ],
      "ecosystem_positions": [
        "deployers-operators",
        "regulators-public-servants"
      ],
      "impact_level": "society-wide",
      "causal_factors": [
        "training-data-bias",
        "model-opacity",
        "over-automation",
        "regulatory-gap"
      ],
      "assets_involved": [
        "decision-automation"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "discrimination-social-harm",
          "pattern": "proxy-discrimination"
        },
        "secondary": [
          {
            "domain": "human-ai-control",
            "pattern": "overreliance-automation-bias"
          }
        ]
      },
      "sources": [
        {
          "title": "Council of Europe Venice Commission: Report on the Dutch Childcare Benefits Affair",
          "type": "primary",
          "date": "2021-10",
          "url": "https://www.venice.coe.int/webforms/documents/default.aspx?pdffile=CDL-REF(2021)073-e"
        },
        {
          "title": "Dutch Administrative Jurisdiction Division Court Ruling",
          "type": "primary",
          "date": "2019-10",
          "url": ""
        }
      ],
      "outcomes": {
        "financial_loss": "Over EUR 5.4 billion allocated for compensation",
        "arrests": "None; parliamentary and criminal investigations ongoing",
        "recovery": "Compensation scheme established for over 26,000 affected families",
        "regulatory_action": "Dutch government resigned; families compensated; algorithm banned"
      }
    },
    {
      "id": "INC-10-0001",
      "title": "2010 Flash Crash — Algorithmic Trading Cascading Failure",
      "slug": "flash-crash-algorithmic-trading",
      "url": "https://topaithreats.com/incidents/INC-10-0001-flash-crash-algorithmic-trading/",
      "failure_stage": "harm",
      "status": "confirmed",
      "severity": "critical",
      "evidence_level": "primary",
      "date_occurred": "2010-05",
      "last_updated": "2026-02-15",
      "regions": [
        "north-america"
      ],
      "sectors": [
        "finance"
      ],
      "affected_groups": [
        "business-organizations",
        "general-public"
      ],
      "exposure_pathways": [
        "infrastructure-dependency"
      ],
      "ecosystem_positions": [
        "developers-providers",
        "deployers-operators"
      ],
      "impact_level": "sector",
      "causal_factors": [
        "over-automation",
        "competitive-pressure"
      ],
      "assets_involved": [
        "financial-systems"
      ],
      "threat_patterns": {
        "primary": {
          "domain": "systemic-catastrophic",
          "pattern": "infrastructure-dependency-collapse"
        },
        "secondary": [
          {
            "domain": "agentic-autonomous",
            "pattern": "multi-agent-coordination-failures"
          }
        ]
      },
      "sources": [
        {
          "title": "SEC/CFTC: Findings Regarding the Market Events of May 6, 2010",
          "type": "primary",
          "date": "2010-09",
          "url": "https://www.sec.gov/news/studies/2010/marketevents-report.pdf"
        },
        {
          "title": "Reuters: U.S. probes warming flash crash role",
          "type": "news",
          "date": "2010-10",
          "url": "https://www.reuters.com/article/us-flashcrash-waddell-idUSTRE69462020101005"
        },
        {
          "title": "Wall Street Journal: SEC Report on Flash Crash",
          "type": "news",
          "date": "2010-10",
          "url": "https://www.wsj.com/articles/SB10001424052748704029304575526390131916792"
        }
      ],
      "outcomes": {
        "financial_loss": "Nearly $1 trillion in market value temporarily erased; permanent losses difficult to quantify",
        "arrests": "Navinder Singh Sarao arrested in 2015 for market manipulation (spoofing) contributing to the crash",
        "recovery": "Markets largely recovered within 20 minutes; some trades later cancelled",
        "regulatory_action": "SEC/CFTC implemented circuit breakers and single-stock limit up/limit down rules; new regulations on high-frequency trading"
      }
    }
  ]
}