{
  "version": "1.0.0",
  "generatedAt": "2026-05-07T12:54:00.473Z",
  "findingType": "lie",
  "findingId": 2,
  "contentHash": "12eba2dc95d3e7034bf700fb658b5e15695a2a5a4588b44c1fe9646b7658b460",
  "title": "OpenAI: usage policy bars 'military and warfare' applications",
  "summary": "OpenAI promised: \"Disallowed usage of our models: … Activity that has high risk of physical harm, including: Weapons development; Military and warfare.\" (Thu Mar 23 2023 00:00:00 GMT+0000 (Coordinated Universal Time)). 2 documented violation(s). Severity: catastrophic.",
  "methodology": "Documented promises are sourced from public statements, filings, and transcripts. Each promise must have a non-empty source URL. Violations are independently sourced with evidence strength tiers. The time delta (days after promise) quantifies the gap between commitment and breach. Materiality assessment includes financial cost, people affected, and environmental impact where applicable.",
  "sources": [
    {
      "url": "https://web.archive.org/web/20240108162812/https://openai.com/policies/usage-policies",
      "label": "OpenAI Usage Policies, archived 8 January 2024",
      "type": "primary"
    },
    {
      "url": "https://www.theintercept.com/2024/01/12/open-ai-military-ban-chatgpt/",
      "label": "The Intercept, 12 January 2024",
      "type": "primary"
    },
    {
      "url": "https://www.reuters.com/technology/artificial-intelligence/openai-partners-with-anduril-give-ai-expertise-anti-drone-systems-2024-12-04/",
      "label": "Reuters, 4 December 2024",
      "type": "primary"
    }
  ],
  "queries": [
    {
      "description": "Promise record with entity",
      "table": "documented_promises",
      "parameters": {
        "id": 2
      }
    },
    {
      "description": "Promise violations",
      "table": "promise_violations",
      "parameters": {
        "promiseId": 2
      }
    }
  ],
  "dataSnapshot": {
    "promiseId": "2",
    "slug": "openai-no-military-use-2023",
    "entityId": "9144",
    "entityName": "OpenAI",
    "headline": "OpenAI: usage policy bars 'military and warfare' applications",
    "promiseText": "Disallowed usage of our models: … Activity that has high risk of physical harm, including: Weapons development; Military and warfare.",
    "promiseDate": "2023-03-23T00:00:00.000Z",
    "promiseSourceUrl": "https://web.archive.org/web/20240108162812/https://openai.com/policies/usage-policies",
    "audience": "general_public",
    "domain": "military",
    "severity": "catastrophic",
    "status": "violated",
    "narrative": "For nine months OpenAI's published usage policy explicitly banned military and warfare applications. In January 2024 the prohibition was quietly removed and within days the company confirmed an active working relationship with the United States Department of Defense.",
    "financialCostUsd": null,
    "peopleAffectedEstimate": null,
    "violations": [
      {
        "id": "4",
        "violationText": "OpenAI removes the explicit 'military and warfare' prohibition from its usage policy.",
        "violationDate": "2024-01-12T00:00:00.000Z",
        "violationSourceUrl": "https://www.theintercept.com/2024/01/12/open-ai-military-ban-chatgpt/",
        "evidenceStrength": "primary",
        "daysAfterPromise": "295"
      },
      {
        "id": "5",
        "violationText": "OpenAI confirms cybersecurity work with the US Department of Defense; later announces partnership with weapons-maker Anduril.",
        "violationDate": "2024-12-04T00:00:00.000Z",
        "violationSourceUrl": "https://www.reuters.com/technology/artificial-intelligence/openai-partners-with-anduril-give-ai-expertise-anti-drone-systems-2024-12-04/",
        "evidenceStrength": "primary",
        "daysAfterPromise": "622"
      }
    ],
    "createdAt": "2026-05-03T09:27:08.967Z"
  },
  "entities": [
    {
      "id": "9144",
      "name": "OpenAI",
      "slug": "openai",
      "type": "company"
    }
  ],
  "verificationSteps": [
    "Retrieve documented promise #2 from documented_promises.",
    "Visit promise source: https://web.archive.org/web/20240108162812/https://openai.com/policies/usage-policies — confirm the original commitment was made.",
    "Violation #1 (Fri Jan 12 2024 00:00:00 GMT+0000 (Coordinated Universal Time)): visit https://www.theintercept.com/2024/01/12/open-ai-military-ban-chatgpt/ — confirm \"OpenAI removes the explicit 'military and warfare' prohibition from its usage policy.\"",
    "Violation #2 (Wed Dec 04 2024 00:00:00 GMT+0000 (Coordinated Universal Time)): visit https://www.reuters.com/technology/artificial-intelligence/openai-partners-with-anduril-give-ai-expertise-anti-drone-systems-2024-12-04/ — confirm \"OpenAI confirms cybersecurity work with the US Department of Defense; later announces partnership with weapons-maker Anduril.\"",
    "Verify the SHA-256 content hash by re-generating the JSON bundle."
  ]
}