{ "type": "bundle", "id": "bundle--11cb50a2-bded-430d-976a-c506226b165b", "objects": [ { "type": "attack-pattern", "spec_version": "2.1", "id": "attack-pattern--7b6c328e-b050-4d76-8e11-ff3b3fe7dea3", "created_by_ref": "identity--f1a0f560-2d9e-4c5d-bf47-7e96e805de82", "created": "2024-11-22T16:43:58.10809Z", "modified": "2024-11-22T16:43:58.10809Z", "name": "Develop AI-Generated Images (Deepfakes)", "description": "Deepfakes refer to AI-generated falsified photos, videos, or soundbites. An influence operation may use deepfakes to depict an inauthentic situation by synthetically recreating an individual\u2019s face, body, voice, and physical gestures.

Associated Techniques and Sub-techniques:
T0145.002: AI-Generated Account Imagery: Analysts should use this sub-technique to document use of AI generated imagery in accounts\u2019 profile pictures or other account imagery.", "kill_chain_phases": [ { "kill_chain_name": "mitre-attack", "phase_name": "develop-content" } ], "external_references": [ { "source_name": "mitre-attack", "url": "https://github.com/DISARMFoundation/DISARMframeworks/blob/main/generated_pages/techniques/T0086.002.md", "external_id": "T0086.002" } ], "object_marking_refs": [ "marking-definition--f79f25d2-8b96-4580-b169-eb7b613a7c31" ], "x_mitre_is_subtechnique": true, "x_mitre_platforms": [ "Windows", "Linux", "Mac" ], "x_mitre_version": "2.1" } ] }