diff --git a/DISARM_MASTER_DATA/DISARM_DATA_MASTER.xlsx b/DISARM_MASTER_DATA/DISARM_DATA_MASTER.xlsx index 6582d48..c826385 100644 Binary files a/DISARM_MASTER_DATA/DISARM_DATA_MASTER.xlsx and b/DISARM_MASTER_DATA/DISARM_DATA_MASTER.xlsx differ diff --git a/DISARM_MASTER_DATA/DISARM_FRAMEWORKS_MASTER.xlsx b/DISARM_MASTER_DATA/DISARM_FRAMEWORKS_MASTER.xlsx index c559e4e..198c234 100644 Binary files a/DISARM_MASTER_DATA/DISARM_FRAMEWORKS_MASTER.xlsx and b/DISARM_MASTER_DATA/DISARM_FRAMEWORKS_MASTER.xlsx differ diff --git a/generated_files/disarm_red_framework_clickable.html b/generated_files/disarm_red_framework_clickable.html index f12c2e8..8687014 100644 --- a/generated_files/disarm_red_framework_clickable.html +++ b/generated_files/disarm_red_framework_clickable.html @@ -568,7 +568,7 @@ function handleTechniqueClick(box) { T0137.006 Manipulate Stocks T0088.001 Develop AI-Generated Audio (Deepfakes) -T0152.002 Blog +T0152.002 Blog Asset @@ -604,7 +604,7 @@ function handleTechniqueClick(box) { T0138.001 Encourage T0089 Obtain Private Documents -T0152.004 Website +T0152.004 Website Asset @@ -657,7 +657,7 @@ function handleTechniqueClick(box) { T0139 Dissuade from Acting -T0146 Account +T0146 Account Asset T0152.007 Audio Platform @@ -675,7 +675,7 @@ function handleTechniqueClick(box) { T0139.001 Discourage -T0146.001 Free Account +T0146.001 Free Account Asset T0152.008 Live Streaming Platform @@ -693,7 +693,7 @@ function handleTechniqueClick(box) { T0139.002 Silence -T0146.002 Paid Account +T0146.002 Paid Account Asset T0152.009 Software Delivery Platform @@ -711,7 +711,7 @@ function handleTechniqueClick(box) { T0139.003 Deter -T0146.003 Verified Account +T0146.003 Verified Account Asset T0152.010 File Hosting Platform @@ -729,7 +729,7 @@ function handleTechniqueClick(box) { T0140 Cause Harm -T0146.004 Administrator Account +T0146.004 Administrator Account Asset T0152.011 Wiki Platform @@ -783,7 +783,7 @@ function handleTechniqueClick(box) { T0140.003 Spread Hate -T0146.007 Automated Account +T0146.007 Automated Account Asset T0153.001 Email Platform @@ -801,7 +801,7 @@ function handleTechniqueClick(box) { -T0147 Software +T0147 Software Asset T0153.002 Link Shortening Platform @@ -819,8 +819,8 @@ function handleTechniqueClick(box) { -T0147.001 Game -T0153.003 Shortened Link +T0147.001 Game Asset +T0153.003 Shortened Link Asset @@ -837,8 +837,8 @@ function handleTechniqueClick(box) { -T0147.002 Game Mod -T0153.004 QR Code +T0147.002 Game Mod Asset +T0153.004 QR Code Asset @@ -855,7 +855,7 @@ function handleTechniqueClick(box) { -T0147.003 Malware +T0147.003 Malware Asset T0153.005 Online Advertising Platform @@ -873,7 +873,7 @@ function handleTechniqueClick(box) { -T0147.004 Mobile App +T0147.004 Mobile App Asset T0153.006 Content Recommendation Algorithm @@ -927,7 +927,7 @@ function handleTechniqueClick(box) { -T0148.002 Bank Account +T0148.002 Bank Account Asset T0154.001 AI LLM Platform @@ -982,7 +982,7 @@ function handleTechniqueClick(box) { T0148.005 Subscription Processing Capability -T0155.001 Password Gated +T0155.001 Password Gated Asset @@ -1000,7 +1000,7 @@ function handleTechniqueClick(box) { T0148.006 Crowdfunding Platform -T0155.002 Invite Gated +T0155.002 Invite Gated Asset @@ -1018,7 +1018,7 @@ function handleTechniqueClick(box) { T0148.007 eCommerce Platform -T0155.003 Approval Gated +T0155.003 Approval Gated Asset @@ -1036,7 +1036,7 @@ function handleTechniqueClick(box) { T0148.008 Cryptocurrency Exchange Platform -T0155.004 Geoblocked +T0155.004 Geoblocked Asset @@ -1054,7 +1054,7 @@ function handleTechniqueClick(box) { T0148.009 Cryptocurrency Wallet -T0155.005 Paid Access +T0155.005 Paid Access Asset @@ -1072,7 +1072,7 @@ function handleTechniqueClick(box) { T0149 Online Infrastructure -T0155.006 Subscription Access +T0155.006 Subscription Access Asset @@ -1089,7 +1089,7 @@ function handleTechniqueClick(box) { -T0149.001 Domain +T0149.001 Domain Asset T0155.007 Encrypted Communication Channel @@ -1107,7 +1107,7 @@ function handleTechniqueClick(box) { -T0149.002 Email Domain +T0149.002 Email Domain Asset @@ -1143,7 +1143,7 @@ function handleTechniqueClick(box) { -T0149.004 Redirecting Domain +T0149.004 Redirecting Domain Asset @@ -1161,7 +1161,7 @@ function handleTechniqueClick(box) { -T0149.005 Server +T0149.005 Server Asset @@ -1179,7 +1179,7 @@ function handleTechniqueClick(box) { -T0149.006 IP Address +T0149.006 IP Address Asset @@ -1197,7 +1197,7 @@ function handleTechniqueClick(box) { -T0149.007 VPN +T0149.007 VPN Asset @@ -1215,7 +1215,7 @@ function handleTechniqueClick(box) { -T0149.008 Proxy IP Address +T0149.008 Proxy IP Address Asset @@ -1269,7 +1269,7 @@ function handleTechniqueClick(box) { -T0150.001 Newly Created +T0150.001 Newly Created Asset @@ -1287,7 +1287,7 @@ function handleTechniqueClick(box) { -T0150.002 Dormant +T0150.002 Dormant Asset @@ -1305,7 +1305,7 @@ function handleTechniqueClick(box) { -T0150.003 Pre-Existing +T0150.003 Pre-Existing Asset @@ -1323,7 +1323,7 @@ function handleTechniqueClick(box) { -T0150.004 Repurposed +T0150.004 Repurposed Asset @@ -1341,7 +1341,7 @@ function handleTechniqueClick(box) { -T0150.005 Compromised +T0150.005 Compromised Asset @@ -1359,7 +1359,7 @@ function handleTechniqueClick(box) { -T0150.006 Purchased +T0150.006 Purchased Asset @@ -1377,7 +1377,7 @@ function handleTechniqueClick(box) { -T0150.007 Rented +T0150.007 Rented Asset @@ -1395,7 +1395,7 @@ function handleTechniqueClick(box) { -T0150.008 Bulk Created +T0150.008 Bulk Created Asset @@ -1703,7 +1703,7 @@ function handleTechniqueClick(box) { - + @@ -1713,7 +1713,7 @@ function handleTechniqueClick(box) { - + @@ -1726,23 +1726,23 @@ function handleTechniqueClick(box) { - + - + - + - + - + @@ -1752,59 +1752,59 @@ function handleTechniqueClick(box) { - + - + - - - - - + + + + + - + - + - + - + - + - + - + - - + + - + - - - - - + + + + + - - - - - - - - + + + + + + + + diff --git a/generated_pages/disarm_red_framework.md b/generated_pages/disarm_red_framework.md index 5a550b3..1866503 100644 --- a/generated_pages/disarm_red_framework.md +++ b/generated_pages/disarm_red_framework.md @@ -529,7 +529,7 @@ T0137.006 Manipulate Stocks T0088.001 Develop AI-Generated Audio (Deepfakes) -T0152.002 Blog +T0152.002 Blog Asset @@ -565,7 +565,7 @@ T0138.001 Encourage T0089 Obtain Private Documents -T0152.004 Website +T0152.004 Website Asset @@ -618,7 +618,7 @@ T0139 Dissuade from Acting -T0146 Account +T0146 Account Asset T0152.007 Audio Platform @@ -636,7 +636,7 @@ T0139.001 Discourage -T0146.001 Free Account +T0146.001 Free Account Asset T0152.008 Live Streaming Platform @@ -654,7 +654,7 @@ T0139.002 Silence -T0146.002 Paid Account +T0146.002 Paid Account Asset T0152.009 Software Delivery Platform @@ -672,7 +672,7 @@ T0139.003 Deter -T0146.003 Verified Account +T0146.003 Verified Account Asset T0152.010 File Hosting Platform @@ -690,7 +690,7 @@ T0140 Cause Harm -T0146.004 Administrator Account +T0146.004 Administrator Account Asset T0152.011 Wiki Platform @@ -744,7 +744,7 @@ T0140.003 Spread Hate -T0146.007 Automated Account +T0146.007 Automated Account Asset T0153.001 Email Platform @@ -762,7 +762,7 @@ -T0147 Software +T0147 Software Asset T0153.002 Link Shortening Platform @@ -780,8 +780,8 @@ -T0147.001 Game -T0153.003 Shortened Link +T0147.001 Game Asset +T0153.003 Shortened Link Asset @@ -798,8 +798,8 @@ -T0147.002 Game Mod -T0153.004 QR Code +T0147.002 Game Mod Asset +T0153.004 QR Code Asset @@ -816,7 +816,7 @@ -T0147.003 Malware +T0147.003 Malware Asset T0153.005 Online Advertising Platform @@ -834,7 +834,7 @@ -T0147.004 Mobile App +T0147.004 Mobile App Asset T0153.006 Content Recommendation Algorithm @@ -888,7 +888,7 @@ -T0148.002 Bank Account +T0148.002 Bank Account Asset T0154.001 AI LLM Platform @@ -943,7 +943,7 @@ T0148.005 Subscription Processing Capability -T0155.001 Password Gated +T0155.001 Password Gated Asset @@ -961,7 +961,7 @@ T0148.006 Crowdfunding Platform -T0155.002 Invite Gated +T0155.002 Invite Gated Asset @@ -979,7 +979,7 @@ T0148.007 eCommerce Platform -T0155.003 Approval Gated +T0155.003 Approval Gated Asset @@ -997,7 +997,7 @@ T0148.008 Cryptocurrency Exchange Platform -T0155.004 Geoblocked +T0155.004 Geoblocked Asset @@ -1015,7 +1015,7 @@ T0148.009 Cryptocurrency Wallet -T0155.005 Paid Access +T0155.005 Paid Access Asset @@ -1033,7 +1033,7 @@ T0149 Online Infrastructure -T0155.006 Subscription Access +T0155.006 Subscription Access Asset @@ -1050,7 +1050,7 @@ -T0149.001 Domain +T0149.001 Domain Asset T0155.007 Encrypted Communication Channel @@ -1068,7 +1068,7 @@ -T0149.002 Email Domain +T0149.002 Email Domain Asset @@ -1104,7 +1104,7 @@ -T0149.004 Redirecting Domain +T0149.004 Redirecting Domain Asset @@ -1122,7 +1122,7 @@ -T0149.005 Server +T0149.005 Server Asset @@ -1140,7 +1140,7 @@ -T0149.006 IP Address +T0149.006 IP Address Asset @@ -1158,7 +1158,7 @@ -T0149.007 VPN +T0149.007 VPN Asset @@ -1176,7 +1176,7 @@ -T0149.008 Proxy IP Address +T0149.008 Proxy IP Address Asset @@ -1230,7 +1230,7 @@ -T0150.001 Newly Created +T0150.001 Newly Created Asset @@ -1248,7 +1248,7 @@ -T0150.002 Dormant +T0150.002 Dormant Asset @@ -1266,7 +1266,7 @@ -T0150.003 Pre-Existing +T0150.003 Pre-Existing Asset @@ -1284,7 +1284,7 @@ -T0150.004 Repurposed +T0150.004 Repurposed Asset @@ -1302,7 +1302,7 @@ -T0150.005 Compromised +T0150.005 Compromised Asset @@ -1320,7 +1320,7 @@ -T0150.006 Purchased +T0150.006 Purchased Asset @@ -1338,7 +1338,7 @@ -T0150.007 Rented +T0150.007 Rented Asset @@ -1356,7 +1356,7 @@ -T0150.008 Bulk Created +T0150.008 Bulk Created Asset diff --git a/generated_pages/incidents/I00002.md b/generated_pages/incidents/I00002.md index b5e2b64..4a55b43 100644 --- a/generated_pages/incidents/I00002.md +++ b/generated_pages/incidents/I00002.md @@ -20,9 +20,6 @@ | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0017 Conduct Fundraising](../../generated_pages/techniques/T0017.md) | IT00000002 Promote "funding" campaign | -| [T0018 Purchase Targeted Advertisements](../../generated_pages/techniques/T0018.md) | IT00000001 buy FB targeted ads | -| [T0046 Use Search Engine Optimisation](../../generated_pages/techniques/T0046.md) | IT00000005 SEO optimisation/manipulation ("key words") | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00005.md b/generated_pages/incidents/I00005.md index 08ce95b..4032cab 100644 --- a/generated_pages/incidents/I00005.md +++ b/generated_pages/incidents/I00005.md @@ -29,11 +29,6 @@ The report adds that although officially the Russian government asserted its neu | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0010 Cultivate Ignorant Agents](../../generated_pages/techniques/T0010.md) | IT00000016 cultivate, manipulate, exploit useful idiots | -| [T0018 Purchase Targeted Advertisements](../../generated_pages/techniques/T0018.md) | IT00000010 Targeted FB paid ads | -| [T0029 Online Polls](../../generated_pages/techniques/T0029.md) | IT00000013 manipulate social media "online polls"? | -| [T0046 Use Search Engine Optimisation](../../generated_pages/techniques/T0046.md) | IT00000022 SEO optimisation/manipulation ("key words") | -| [T0057 Organise Events](../../generated_pages/techniques/T0057.md) | IT00000012 Digital to physical "organize+promote" rallies & events? | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00006.md b/generated_pages/incidents/I00006.md index 5c5bbe8..0afef53 100644 --- a/generated_pages/incidents/I00006.md +++ b/generated_pages/incidents/I00006.md @@ -20,9 +20,6 @@ | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0015 Create Hashtags and Search Artefacts](../../generated_pages/techniques/T0015.md) | IT00000027 Create and use hashtag | -| [T0039 Bait Influencer](../../generated_pages/techniques/T0039.md) | IT00000030 bait journalists/media/politicians | -| [T0151.004 Chat Platform](../../generated_pages/techniques/T0151.004.md) | IT00000025 Use SMS/text messages | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00007.md b/generated_pages/incidents/I00007.md index 10bf62c..d3ddc71 100644 --- a/generated_pages/incidents/I00007.md +++ b/generated_pages/incidents/I00007.md @@ -20,7 +20,6 @@ | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0010 Cultivate Ignorant Agents](../../generated_pages/techniques/T0010.md) | IT00000033 cultivate, manipulate, exploit useful idiots (in the case Paul Manafort) | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00009.md b/generated_pages/incidents/I00009.md index 421bb04..472b340 100644 --- a/generated_pages/incidents/I00009.md +++ b/generated_pages/incidents/I00009.md @@ -20,7 +20,6 @@ | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0045 Use Fake Experts](../../generated_pages/techniques/T0045.md) | IT00000036 Using "expert" | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00010.md b/generated_pages/incidents/I00010.md index cdeb5c1..17f3727 100644 --- a/generated_pages/incidents/I00010.md +++ b/generated_pages/incidents/I00010.md @@ -20,10 +20,6 @@ | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0010 Cultivate Ignorant Agents](../../generated_pages/techniques/T0010.md) | IT00000044 cultivate, manipulate, exploit useful idiots (Alex Jones... drives conspiracy theories; false flags, crisis actors) | -| [T0020 Trial Content](../../generated_pages/techniques/T0020.md) | IT00000048 4Chan/8Chan - trial content | -| [T0039 Bait Influencer](../../generated_pages/techniques/T0039.md) | IT00000049 journalist/media baiting | -| [T0046 Use Search Engine Optimisation](../../generated_pages/techniques/T0046.md) | IT00000043 SEO optimisation/manipulation ("key words") | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00015.md b/generated_pages/incidents/I00015.md index ad6561d..588edca 100644 --- a/generated_pages/incidents/I00015.md +++ b/generated_pages/incidents/I00015.md @@ -20,8 +20,6 @@ | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0039 Bait Influencer](../../generated_pages/techniques/T0039.md) | IT00000053 journalist/media baiting | -| [T0044 Seed Distortions](../../generated_pages/techniques/T0044.md) | IT00000052 Circulate to media via DM, then release publicly | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00017.md b/generated_pages/incidents/I00017.md index 09e1ae5..0e10503 100644 --- a/generated_pages/incidents/I00017.md +++ b/generated_pages/incidents/I00017.md @@ -20,13 +20,6 @@ | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0010 Cultivate Ignorant Agents](../../generated_pages/techniques/T0010.md) | IT00000063 cultivate, manipulate, exploit useful idiots | -| [T0016 Create Clickbait](../../generated_pages/techniques/T0016.md) | IT00000073 Click-bait (economic actors) fake news sites (ie: Denver Guardian; Macedonian teens) | -| [T0018 Purchase Targeted Advertisements](../../generated_pages/techniques/T0018.md) | IT00000057 Targeted FB paid ads | -| [T0020 Trial Content](../../generated_pages/techniques/T0020.md) | IT00000070 4Chan/8Chan - trial content | -| [T0029 Online Polls](../../generated_pages/techniques/T0029.md) | IT00000060 manipulate social media "online polls"? | -| [T0046 Use Search Engine Optimisation](../../generated_pages/techniques/T0046.md) | IT00000071 SEO optimisation/manipulation ("key words") | -| [T0057 Organise Events](../../generated_pages/techniques/T0057.md) | IT00000059 Digital to physical "organize+promote" rallies & events | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00029.md b/generated_pages/incidents/I00029.md index f519da6..640ce0f 100644 --- a/generated_pages/incidents/I00029.md +++ b/generated_pages/incidents/I00029.md @@ -20,9 +20,6 @@ | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0010 Cultivate Ignorant Agents](../../generated_pages/techniques/T0010.md) | IT00000084 cultivate, manipulate, exploit useful idiots | -| [T0040 Demand Insurmountable Proof](../../generated_pages/techniques/T0040.md) | IT00000089 Demand insurmountable proof | -| [T0046 Use Search Engine Optimisation](../../generated_pages/techniques/T0046.md) | IT00000085 SEO optimisation/manipulation ("key words") | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00032.md b/generated_pages/incidents/I00032.md index 0348fcb..ade83de 100644 --- a/generated_pages/incidents/I00032.md +++ b/generated_pages/incidents/I00032.md @@ -20,10 +20,6 @@ | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0010 Cultivate Ignorant Agents](../../generated_pages/techniques/T0010.md) | IT00000104 cultivate, manipulate, exploit useful idiots (Alex Jones... drives conspiracy theories) | -| [T0020 Trial Content](../../generated_pages/techniques/T0020.md) | IT00000102 4Chan/8Chan - trial content | -| [T0046 Use Search Engine Optimisation](../../generated_pages/techniques/T0046.md) | IT00000103 SEO optimisation/manipulation ("key words") | -| [T0057 Organise Events](../../generated_pages/techniques/T0057.md) | IT00000093 Digital to physical "organize+promote" rallies & events? | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00033.md b/generated_pages/incidents/I00033.md index ffbfcaf..f8a1a4a 100644 --- a/generated_pages/incidents/I00033.md +++ b/generated_pages/incidents/I00033.md @@ -20,10 +20,6 @@ | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0002 Facilitate State Propaganda](../../generated_pages/techniques/T0002.md) | IT00000110 facilitate state propaganda and defuse crises | -| [T0047 Censor Social Media as a Political Force](../../generated_pages/techniques/T0047.md) | IT00000108 cow online opinion leaders into submission, muzzling social media as a political force | -| [T0048 Harass](../../generated_pages/techniques/T0048.md) | IT00000109 cow online opinion leaders into submission, muzzling social media as a political force | -| [T0049 Flood Information Space](../../generated_pages/techniques/T0049.md) | IT00000105 2,000,000 people (est.) part of state run/sponsored astroturfing | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00034.md b/generated_pages/incidents/I00034.md index 48b10ef..85fc964 100644 --- a/generated_pages/incidents/I00034.md +++ b/generated_pages/incidents/I00034.md @@ -22,9 +22,6 @@ Unique for taking place outside of the Chinese Internet system, both transgressi | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0002 Facilitate State Propaganda](../../generated_pages/techniques/T0002.md) | IT00000111 Netizens from one of the largest discussion forums in China, known as Diba, coordinated to overcome China’s Great Firewall | -| [T0049 Flood Information Space](../../generated_pages/techniques/T0049.md) | IT00000112 flood the Facebook pages of Taiwanese politicians and news agencies with a pro-PRC message | -| [T0049 Flood Information Space](../../generated_pages/techniques/T0049.md) | IT00000113 Democratic Progressive Party (DPP), attracted nearly 40,000 Facebook comments in just eight hours. | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00044.md b/generated_pages/incidents/I00044.md index 2355ec9..0ae2de6 100644 --- a/generated_pages/incidents/I00044.md +++ b/generated_pages/incidents/I00044.md @@ -20,9 +20,6 @@ | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0010 Cultivate Ignorant Agents](../../generated_pages/techniques/T0010.md) | IT00000126 cultivate, manipulate, exploit useful idiots (Alex Jones... drives conspiracy theories) | -| [T0020 Trial Content](../../generated_pages/techniques/T0020.md) | IT00000124 4Chan/8Chan - trial content | -| [T0046 Use Search Engine Optimisation](../../generated_pages/techniques/T0046.md) | IT00000125 SEO optimisation/manipulation ("key words") | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00047.md b/generated_pages/incidents/I00047.md index 846b75f..5776246 100644 --- a/generated_pages/incidents/I00047.md +++ b/generated_pages/incidents/I00047.md @@ -21,8 +21,6 @@ The Russian Federal Security Service (FSB), which is also responsible for border | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0023 Distort Facts](../../generated_pages/techniques/T0023.md) | IT00000130 (Distort) Kremlin-controlled RT cited Russian Minister of Foreign Affairs Sergei Lavrov suggesting that Ukraine deliberately provoked Russia in hopes of gaining additional support from the United States and Europe. | -| [T0040 Demand Insurmountable Proof](../../generated_pages/techniques/T0040.md) | IT00000133 Demand insurmountable proof | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00049.md b/generated_pages/incidents/I00049.md index 90d0ed7..cc299ef 100644 --- a/generated_pages/incidents/I00049.md +++ b/generated_pages/incidents/I00049.md @@ -20,8 +20,6 @@ | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0010 Cultivate Ignorant Agents](../../generated_pages/techniques/T0010.md) | IT00000139 cultivate, manipulate, exploit useful idiots (Roger Waters; Venessa Beeley...) | -| [T0046 Use Search Engine Optimisation](../../generated_pages/techniques/T0046.md) | IT00000140 SEO optimisation/manipulation ("key words") | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00050.md b/generated_pages/incidents/I00050.md index 3854b69..cce31d1 100644 --- a/generated_pages/incidents/I00050.md +++ b/generated_pages/incidents/I00050.md @@ -23,8 +23,6 @@ Maduro has remained defiant in the face of domestic and international pressure, | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0010 Cultivate Ignorant Agents](../../generated_pages/techniques/T0010.md) | IT00000148 cultivate, manipulate, exploit useful idiots (Roger Waters) | -| [T0046 Use Search Engine Optimisation](../../generated_pages/techniques/T0046.md) | IT00000149 SEO optimisation/manipulation ("key words") | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00051.md b/generated_pages/incidents/I00051.md index 8532ac6..c2df8d1 100644 --- a/generated_pages/incidents/I00051.md +++ b/generated_pages/incidents/I00051.md @@ -21,8 +21,6 @@ The FCO comments on the IfS were issued after a news report said the group had r | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0010 Cultivate Ignorant Agents](../../generated_pages/techniques/T0010.md) | IT00000158 cultivate, manipulate, exploit useful idiots | -| [T0046 Use Search Engine Optimisation](../../generated_pages/techniques/T0046.md) | IT00000161 SEO optimisation/manipulation ("key words") | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00053.md b/generated_pages/incidents/I00053.md index 074136f..e0b4f05 100644 --- a/generated_pages/incidents/I00053.md +++ b/generated_pages/incidents/I00053.md @@ -21,9 +21,6 @@ Geopolitically complex issue combines US/China trade; Security concerns/issues r | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0023 Distort Facts](../../generated_pages/techniques/T0023.md) | IT00000163 Distorted, saccharine “news” about the Chinese State and Party | -| [T0057 Organise Events](../../generated_pages/techniques/T0057.md) | IT00000164 Events coordinated and promoted across media platforms | -| [T0057 Organise Events](../../generated_pages/techniques/T0057.md) | IT00000166 Extend digital the physical space… gatherings ie: support for Meng outside courthouse | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00056.md b/generated_pages/incidents/I00056.md index 718ee62..19a40fa 100644 --- a/generated_pages/incidents/I00056.md +++ b/generated_pages/incidents/I00056.md @@ -21,8 +21,6 @@ While there is history to Iran’s information/influence operations, starting wi | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0022 Leverage Conspiracy Theory Narratives](../../generated_pages/techniques/T0022.md) | IT00000174 Memes... anti-Isreal/USA/West, conspiracy narratives | -| [T0046 Use Search Engine Optimisation](../../generated_pages/techniques/T0046.md) | IT00000172 SEO optimisation/manipulation ("key words") | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00063.md b/generated_pages/incidents/I00063.md index 2d56fd0..b876196 100644 --- a/generated_pages/incidents/I00063.md +++ b/generated_pages/incidents/I00063.md @@ -20,8 +20,6 @@ | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0010 Cultivate Ignorant Agents](../../generated_pages/techniques/T0010.md) | IT00000182 cultivate, manipulate, exploit useful idiots | -| [T0046 Use Search Engine Optimisation](../../generated_pages/techniques/T0046.md) | IT00000183 SEO optimisation/manipulation ("key words") | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00064.md b/generated_pages/incidents/I00064.md index 72d189b..c1febc4 100644 --- a/generated_pages/incidents/I00064.md +++ b/generated_pages/incidents/I00064.md @@ -21,8 +21,9 @@ | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0143.003 Impersonated Persona](../../generated_pages/techniques/T0143.003.md) | IT00000242 “In the days leading up to the UK’s [2019] general election, youths looking for love online encountered a whole new kind of Tinder nightmare. A group of young activists built a Tinder chatbot to co-opt profiles and persuade swing voters to support Labour. The bot accounts sent 30,000-40,000 messages to targeted 18-25 year olds in battleground constituencies like Dudley North, which Labour ended up winning by only 22 votes. [...]

“The activists maintain that the project was meant to foster democratic engagement. But screenshots of the bots’ activity expose a harsher reality. Images of conversations between real users and these bots, posted on i-D, Mashable, as well as on Fowler and Goodman’s public Twitter accounts, show that the bots did not identify themselves as automated accounts, instead posing as the user whose profile they had taken over. While conducting research for this story, it turned out that a number of [the reporters’ friends] living in Oxford had interacted with the bot in the lead up to the election and had no idea that it was not a real person.”


In this example people offered up their real accounts for the automation of political messaging; the actors convinced the users to give up access to their accounts to use in the operation. The actors maintained the accounts’ existing persona, and presented themselves as potential romantic suitors for legitimate platform users (T0097:109 Romantic Suitor Persona, T0143.003: Impersonated Persona, T0146: Account, T0150.007: Rented, T0151.017: Dating Platform). | -| [T0150.007 Rented](../../generated_pages/techniques/T0150.007.md) | IT00000240 “In the days leading up to the UK’s [2019] general election, youths looking for love online encountered a whole new kind of Tinder nightmare. A group of young activists built a Tinder chatbot to co-opt profiles and persuade swing voters to support Labour. The bot accounts sent 30,000-40,000 messages to targeted 18-25 year olds in battleground constituencies like Dudley North, which Labour ended up winning by only 22 votes. [...]

“The activists maintain that the project was meant to foster democratic engagement. But screenshots of the bots’ activity expose a harsher reality. Images of conversations between real users and these bots, posted on i-D, Mashable, as well as on Fowler and Goodman’s public Twitter accounts, show that the bots did not identify themselves as automated accounts, instead posing as the user whose profile they had taken over. While conducting research for this story, it turned out that a number of [the reporters’ friends] living in Oxford had interacted with the bot in the lead up to the election and had no idea that it was not a real person.”


In this example people offered up their real accounts for the automation of political messaging; the actors convinced the users to give up access to their accounts to use in the operation. The actors maintained the accounts’ existing persona, and presented themselves as potential romantic suitors for legitimate platform users (T0097:109 Romantic Suitor Persona, T0143.003: Impersonated Persona, T0146: Account, T0150.007: Rented, T0151.017: Dating Platform). | +| [T0097.109 Romantic Suitor Persona](../../generated_pages/techniques/T0097.109.md) | IT00000241 “In the days leading up to the UK’s [2019] general election, youths looking for love online encountered a whole new kind of Tinder nightmare. A group of young activists built a Tinder chatbot to co-opt profiles and persuade swing voters to support Labour. The bot accounts sent 30,000-40,000 messages to targeted 18-25 year olds in battleground constituencies like Dudley North, which Labour ended up winning by only 22 votes. [...]

“The activists maintain that the project was meant to foster democratic engagement. But screenshots of the bots’ activity expose a harsher reality. Images of conversations between real users and these bots, posted on i-D, Mashable, as well as on Fowler and Goodman’s public Twitter accounts, show that the bots did not identify themselves as automated accounts, instead posing as the user whose profile they had taken over. While conducting research for this story, it turned out that a number of [the reporters’ friends] living in Oxford had interacted with the bot in the lead up to the election and had no idea that it was not a real person.”


In this example people offered up their real accounts for the automation of political messaging; the actors convinced the users to give up access to their accounts to use in the operation. The actors maintained the accounts’ existing persona, and presented themselves as potential romantic suitors for legitimate platform users (T0097:109 Romantic Suitor Persona, T0143.003: Impersonated Persona, T0146: Account Asset, T0150.007: Rented Asset, T0151.017: Dating Platform). | +| [T0143.003 Impersonated Persona](../../generated_pages/techniques/T0143.003.md) | IT00000242 “In the days leading up to the UK’s [2019] general election, youths looking for love online encountered a whole new kind of Tinder nightmare. A group of young activists built a Tinder chatbot to co-opt profiles and persuade swing voters to support Labour. The bot accounts sent 30,000-40,000 messages to targeted 18-25 year olds in battleground constituencies like Dudley North, which Labour ended up winning by only 22 votes. [...]

“The activists maintain that the project was meant to foster democratic engagement. But screenshots of the bots’ activity expose a harsher reality. Images of conversations between real users and these bots, posted on i-D, Mashable, as well as on Fowler and Goodman’s public Twitter accounts, show that the bots did not identify themselves as automated accounts, instead posing as the user whose profile they had taken over. While conducting research for this story, it turned out that a number of [the reporters’ friends] living in Oxford had interacted with the bot in the lead up to the election and had no idea that it was not a real person.”


In this example people offered up their real accounts for the automation of political messaging; the actors convinced the users to give up access to their accounts to use in the operation. The actors maintained the accounts’ existing persona, and presented themselves as potential romantic suitors for legitimate platform users (T0097:109 Romantic Suitor Persona, T0143.003: Impersonated Persona, T0146: Account Asset, T0150.007: Rented Asset, T0151.017: Dating Platform). | +| [T0150.007 Rented Asset](../../generated_pages/techniques/T0150.007.md) | IT00000240 “In the days leading up to the UK’s [2019] general election, youths looking for love online encountered a whole new kind of Tinder nightmare. A group of young activists built a Tinder chatbot to co-opt profiles and persuade swing voters to support Labour. The bot accounts sent 30,000-40,000 messages to targeted 18-25 year olds in battleground constituencies like Dudley North, which Labour ended up winning by only 22 votes. [...]

“The activists maintain that the project was meant to foster democratic engagement. But screenshots of the bots’ activity expose a harsher reality. Images of conversations between real users and these bots, posted on i-D, Mashable, as well as on Fowler and Goodman’s public Twitter accounts, show that the bots did not identify themselves as automated accounts, instead posing as the user whose profile they had taken over. While conducting research for this story, it turned out that a number of [the reporters’ friends] living in Oxford had interacted with the bot in the lead up to the election and had no idea that it was not a real person.”


In this example people offered up their real accounts for the automation of political messaging; the actors convinced the users to give up access to their accounts to use in the operation. The actors maintained the accounts’ existing persona, and presented themselves as potential romantic suitors for legitimate platform users (T0097:109 Romantic Suitor Persona, T0143.003: Impersonated Persona, T0146: Account Asset, T0150.007: Rented Asset, T0151.017: Dating Platform). | | [T0151.017 Dating Platform](../../generated_pages/techniques/T0151.017.md) | IT00000214 _"In the days leading up to the UK’s [2017] general election, youths looking for love online encountered a whole new kind of Tinder nightmare. A group of young activists built a Tinder chatbot to co-opt profiles and persuade swing voters to support Labour. The bot accounts sent 30,000-40,000 messages to targeted 18-25 year olds in battleground constituencies like Dudley North, which Labour ended up winning by only 22 votes._

_"Tinder is a dating app where users swipe right to indicate attraction and interest in a potential partner. If both people swipe right on each other’s profile, a dialogue box becomes available for them to privately chat. After meeting their crowdfunding goal of only £500, the team built a tool which took over and operated the accounts of recruited Tinder-users. By upgrading the profiles to Tinder Premium, the team was able to place bots in any contested constituency across the UK. Once planted, the bots swiped right on all users in the attempt to get the largest number of matches and inquire into their voting intentions."_

This incident matches T0151.017: Dating Platform, as users of Tinder were targeted in an attempt to persuade users to vote for a particular party in the upcoming election, rather than for the purpose of connecting those who were authentically interested in dating each other. | diff --git a/generated_pages/incidents/I00065.md b/generated_pages/incidents/I00065.md index b3a1fc4..e903849 100644 --- a/generated_pages/incidents/I00065.md +++ b/generated_pages/incidents/I00065.md @@ -21,7 +21,7 @@ | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0097.110 Party Official Persona](../../generated_pages/techniques/T0097.110.md) | IT00000215 _”Overall, narratives promoted in the five operations appear to represent a concerted effort to discredit the ruling political coalition, widen existing domestic political divisions and project an image of coalition disunity in Poland. In each incident, content was primarily disseminated via Twitter, Facebook, and/ or Instagram accounts belonging to Polish politicians, all of whom have publicly claimed their accounts were compromised at the times the posts were made."_

This example demonstrates how threat actors can use compromised accounts to distribute inauthentic content while exploiting the legitimate account holder’s persona (T0097.110: Party Official Persona, T0143.003: Impersonated Persona, T0146: Account, T0150.005: Compromised). | +| [T0097.110 Party Official Persona](../../generated_pages/techniques/T0097.110.md) | IT00000215 _”Overall, narratives promoted in the five operations appear to represent a concerted effort to discredit the ruling political coalition, widen existing domestic political divisions and project an image of coalition disunity in Poland. In each incident, content was primarily disseminated via Twitter, Facebook, and/ or Instagram accounts belonging to Polish politicians, all of whom have publicly claimed their accounts were compromised at the times the posts were made."_

This example demonstrates how threat actors can use compromised accounts to distribute inauthentic content while exploiting the legitimate account holder’s persona (T0097.110: Party Official Persona, T0143.003: Impersonated Persona, T0146: Account Asset, T0150.005: Compromised Asset). | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00066.md b/generated_pages/incidents/I00066.md index 5c24329..e8b16f9 100644 --- a/generated_pages/incidents/I00066.md +++ b/generated_pages/incidents/I00066.md @@ -21,7 +21,7 @@ | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0150.005 Compromised](../../generated_pages/techniques/T0150.005.md) | IT00000216 _"In the early hours of 24 May 2017, a news story appeared on the website of Qatar's official news agency, QNA, reporting that the country's emir, Sheikh Tamim bin Hamad al-Thani, had made an astonishing speech."_

_"[…]_

_"Qatar claimed that the QNA had been hacked. And they said the hack was designed to deliberately spread fake news about the country's leader and its foreign policies. The Qataris specifically blamed UAE, an allegation later repeated by a Washington Post report which cited US intelligence sources. The UAE categorically denied those reports._

_"But the story of the emir's speech unleashed a media free-for-all. Within minutes, Saudi and UAE-owned TV networks - Al Arabiya and Sky News Arabia - picked up on the comments attributed to al-Thani. Both networks accused Qatar of funding extremist groups and of destabilising the region."_

This incident demonstrates how threat actors used a compromised website to allow for an inauthentic narrative to be given a level of credibility which caused significant political fallout (T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0152.004: Website, T0150.005: Compromised). | +| [T0150.005 Compromised Asset](../../generated_pages/techniques/T0150.005.md) | IT00000216 _"In the early hours of 24 May 2017, a news story appeared on the website of Qatar's official news agency, QNA, reporting that the country's emir, Sheikh Tamim bin Hamad al-Thani, had made an astonishing speech."_

_"[…]_

_"Qatar claimed that the QNA had been hacked. And they said the hack was designed to deliberately spread fake news about the country's leader and its foreign policies. The Qataris specifically blamed UAE, an allegation later repeated by a Washington Post report which cited US intelligence sources. The UAE categorically denied those reports._

_"But the story of the emir's speech unleashed a media free-for-all. Within minutes, Saudi and UAE-owned TV networks - Al Arabiya and Sky News Arabia - picked up on the comments attributed to al-Thani. Both networks accused Qatar of funding extremist groups and of destabilising the region."_

This incident demonstrates how threat actors used a compromised website to allow for an inauthentic narrative to be given a level of credibility which caused significant political fallout (T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0152.004: Website Asset, T0150.005: Compromised Asset). | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00068.md b/generated_pages/incidents/I00068.md index 2d3519e..468bdc2 100644 --- a/generated_pages/incidents/I00068.md +++ b/generated_pages/incidents/I00068.md @@ -23,8 +23,9 @@ | --------- | ------------------------- | | [T0088.001 Develop AI-Generated Audio (Deepfakes)](../../generated_pages/techniques/T0088.001.md) | IT00000220 “While reports of [...] deepfake calls targeting private companies are luckily still rare, LastPass itself experienced a deepfake attempt earlier today that we are sharing with the larger community to help raise awareness that this tactic is spreading and all companies should be on the alert. In our case, an employee received a series of calls, texts, and at least one voicemail featuring an audio deepfake from a threat actor impersonating our CEO via WhatsApp. As the attempted communication was outside of normal business communication channels and due to the employee’s suspicion regarding the presence of many of the hallmarks of a social engineering attempt (such as forced urgency), our employee rightly ignored the messages and reported the incident to our internal security team so that we could take steps to both mitigate the threat and raise awareness of the tactic both internally and externally.”

In this example attackers impersonated the CEO of LastPass (T0097.100: Individual Persona, T0143.003: Impersonated Persona), targeting one of its employees over WhatsApp (T0151.004: Chat Platform,T0155.007: Encrypted Communication Channel) using deepfaked audio (T0088.001: Develop AI-Generated Audio (Deepfakes)). | | [T0097.100 Individual Persona](../../generated_pages/techniques/T0097.100.md) | IT00000221 “While reports of [...] deepfake calls targeting private companies are luckily still rare, LastPass itself experienced a deepfake attempt earlier today that we are sharing with the larger community to help raise awareness that this tactic is spreading and all companies should be on the alert. In our case, an employee received a series of calls, texts, and at least one voicemail featuring an audio deepfake from a threat actor impersonating our CEO via WhatsApp. As the attempted communication was outside of normal business communication channels and due to the employee’s suspicion regarding the presence of many of the hallmarks of a social engineering attempt (such as forced urgency), our employee rightly ignored the messages and reported the incident to our internal security team so that we could take steps to both mitigate the threat and raise awareness of the tactic both internally and externally.”

In this example attackers impersonated the CEO of LastPass (T0097.100: Individual Persona, T0143.003: Impersonated Persona), targeting one of its employees over WhatsApp (T0151.004: Chat Platform,T0155.007: Encrypted Communication Channel) using deepfaked audio (T0088.001: Develop AI-Generated Audio (Deepfakes)). | -| [T0143.003 Impersonated Persona](../../generated_pages/techniques/T0143.003.md) | IT00000222 “While reports of [...] deepfake calls targeting private companies are luckily still rare, LastPass itself experienced a deepfake attempt earlier today that we are sharing with the larger community to help raise awareness that this tactic is spreading and all companies should be on the alert. In our case, an employee received a series of calls, texts, and at least one voicemail featuring an audio deepfake from a threat actor impersonating our CEO via WhatsApp. As the attempted communication was outside of normal business communication channels and due to the employee’s suspicion regarding the presence of many of the hallmarks of a social engineering attempt (such as forced urgency), our employee rightly ignored the messages and reported the incident to our internal security team so that we could take steps to both mitigate the threat and raise awareness of the tactic both internally and externally.”

In this example attackers created an account on WhatsApp which impersonated the CEO of lastpass (T0097.100: Individual Persona, T0143.003: Impersonated Persona, T0146: Account, T0151.004: Chat Platform, T0155.007: Encrypted Communication Channel). They used this asset to target an employee using deepfaked audio (T0088.001: Develop AI-Generated Audio (Deepfakes)). | -| [T0151.004 Chat Platform](../../generated_pages/techniques/T0151.004.md) | IT00000219 “While reports of [...] deepfake calls targeting private companies are luckily still rare, LastPass itself experienced a deepfake attempt earlier today that we are sharing with the larger community to help raise awareness that this tactic is spreading and all companies should be on the alert. In our case, an employee received a series of calls, texts, and at least one voicemail featuring an audio deepfake from a threat actor impersonating our CEO via WhatsApp. As the attempted communication was outside of normal business communication channels and due to the employee’s suspicion regarding the presence of many of the hallmarks of a social engineering attempt (such as forced urgency), our employee rightly ignored the messages and reported the incident to our internal security team so that we could take steps to both mitigate the threat and raise awareness of the tactic both internally and externally.”

In this example attackers created an account on WhatsApp which impersonated the CEO of lastpass (T0097.100: Individual Persona, T0143.003: Impersonated Persona, T0146: Account, T0151.004: Chat Platform, T0155.007: Encrypted Communication Channel). They used this asset to target an employee using deepfaked audio (T0088.001: Develop AI-Generated Audio (Deepfakes)). | +| [T0143.003 Impersonated Persona](../../generated_pages/techniques/T0143.003.md) | IT00000222 “While reports of [...] deepfake calls targeting private companies are luckily still rare, LastPass itself experienced a deepfake attempt earlier today that we are sharing with the larger community to help raise awareness that this tactic is spreading and all companies should be on the alert. In our case, an employee received a series of calls, texts, and at least one voicemail featuring an audio deepfake from a threat actor impersonating our CEO via WhatsApp. As the attempted communication was outside of normal business communication channels and due to the employee’s suspicion regarding the presence of many of the hallmarks of a social engineering attempt (such as forced urgency), our employee rightly ignored the messages and reported the incident to our internal security team so that we could take steps to both mitigate the threat and raise awareness of the tactic both internally and externally.”

In this example attackers created an account on WhatsApp which impersonated the CEO of lastpass (T0097.100: Individual Persona, T0143.003: Impersonated Persona, T0146: Account Asset, T0151.004: Chat Platform, T0155.007: Encrypted Communication Channel). They used this asset to target an employee using deepfaked audio (T0088.001: Develop AI-Generated Audio (Deepfakes)). | +| [T0151.004 Chat Platform](../../generated_pages/techniques/T0151.004.md) | IT00000219 “While reports of [...] deepfake calls targeting private companies are luckily still rare, LastPass itself experienced a deepfake attempt earlier today that we are sharing with the larger community to help raise awareness that this tactic is spreading and all companies should be on the alert. In our case, an employee received a series of calls, texts, and at least one voicemail featuring an audio deepfake from a threat actor impersonating our CEO via WhatsApp. As the attempted communication was outside of normal business communication channels and due to the employee’s suspicion regarding the presence of many of the hallmarks of a social engineering attempt (such as forced urgency), our employee rightly ignored the messages and reported the incident to our internal security team so that we could take steps to both mitigate the threat and raise awareness of the tactic both internally and externally.”

In this example attackers created an account on WhatsApp which impersonated the CEO of lastpass (T0097.100: Individual Persona, T0143.003: Impersonated Persona, T0146: Account Asset, T0151.004: Chat Platform, T0155.007: Encrypted Communication Channel). They used this asset to target an employee using deepfaked audio (T0088.001: Develop AI-Generated Audio (Deepfakes)). | +| [T0155.007 Encrypted Communication Channel](../../generated_pages/techniques/T0155.007.md) |  IT00000547 “While reports of [...] deepfake calls targeting private companies are luckily still rare, LastPass itself experienced a deepfake attempt earlier today that we are sharing with the larger community to help raise awareness that this tactic is spreading and all companies should be on the alert. In our case, an employee received a series of calls, texts, and at least one voicemail featuring an audio deepfake from a threat actor impersonating our CEO via WhatsApp. As the attempted communication was outside of normal business communication channels and due to the employee’s suspicion regarding the presence of many of the hallmarks of a social engineering attempt (such as forced urgency), our employee rightly ignored the messages and reported the incident to our internal security team so that we could take steps to both mitigate the threat and raise awareness of the tactic both internally and externally.”

In this example attackers created an account on WhatsApp which impersonated the CEO of lastpass (T0097.100: Individual Persona, T0143.003: Impersonated Persona, T0146: Account Asset, T0151.004: Chat Platform, T0155.007: Encrypted Communication Channel). They used this asset to target an employee using deepfaked audio (T0088.001: Develop AI-Generated Audio (Deepfakes)). | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00071.md b/generated_pages/incidents/I00071.md index 5724d6e..bacc43e 100644 --- a/generated_pages/incidents/I00071.md +++ b/generated_pages/incidents/I00071.md @@ -22,12 +22,12 @@ | Technique | Description given for this incident | | --------- | ------------------------- | | [T0085.004 Develop Document](../../generated_pages/techniques/T0085.004.md) | IT00000324 “On August 16, 2022, pro-Kremlin Telegram channel Joker DPR (Джокер ДНР) published a forged letter allegedly written by Ukrainian Foreign Minister Dmytro Kuleba. In the letter, Kuleba supposedly asked relevant Polish authorities to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII.

[...]

The letter is not dated, and Dmytro Kuleba’s signature seems to be copied from a publicly available letter signed by him in 2021.”


In this example the Telegram channel Joker DPR published a forged letter (T0085.004: Develop Document) in which they impersonated the Ukrainian Minister of Foreign Affairs (T0097.111: Government Official Persona, T0143.003: Impersonated Persona), using Ministry letterhead (T0097.206: Government Institution Persona, T0143.003: Impersonated Persona). | -| [T0097.101 Local Persona](../../generated_pages/techniques/T0097.101.md) | IT00000238 “The August 17 [2022] Telegram post [which contained a falsified letter from the Ukrainian Minister of Foreign Affairs asking Poland to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII] also contained screenshots of Facebook posts that appeared on two Facebook accounts belonging to Polish nationals Piotr Górka, an expert in the history of the Polish Air Force, and Dariusz Walusiak, a Polish historian and documentary maker. The Górka post suggested that he fully supported the Polish government’s decision to change Belwederska Street to Stepan Bandera Street.

“In a statement to the DFRLab, Górka said his account was accessed without his consent. “This is not my post loaded to my Facebook page,” he explained. “My site was hacked, some days ago.” At the time of publishing, Piotr Górka’s post and his Facebook account were no longer accessible.

“The post on Górka’s Facebook page was shared by Dariusz Walusiak’s Facebook account; the account also reposted it on the Facebook walls of more than twenty other Facebook users, including Adam Kalita, currently working at Krakow branch of the Institute of National Remembrance; Jan Kasprzyk, head of the Office for War Veterans and Victims of Oppression; and Alicja Kondraciuk, a Polish public figure living in Krakow.

“Walusiak’s Facebook account is also no longer accessible. Given his work on Polish history and identity, it seems highly unlikely he would support the Bandera measure; the DFRLab has also reached out to him for comment.

“The fact that Joker DPR’s Telegram post included screenshots of their Facebook posts raises the strong possibility that both Facebook accounts were compromised, and that hackers planted false statements on their pages that would seem out of character for them in order to gain further attention to the forged documents.”


In this example, threat actors used compromised accounts of Polish historians who have enough relevant knowledge to plausibly weigh in on the forged letter’s narrative (T0143.003: Impersonated Persona, T0097.101: Local Persona, T0097.108: Expert Persona, T0146: Account, T0150.005: Compromised, T0151.001: Social Media Platform).

This matches T0097.108: Expert Persona because the impersonation exploited Górka and Walusiak’s existing personas as experts in Polish history. | -| [T0097.108 Expert Persona](../../generated_pages/techniques/T0097.108.md) | IT00000239 “The August 17 [2022] Telegram post [which contained a falsified letter from the Ukrainian Minister of Foreign Affairs asking Poland to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII] also contained screenshots of Facebook posts that appeared on two Facebook accounts belonging to Polish nationals Piotr Górka, an expert in the history of the Polish Air Force, and Dariusz Walusiak, a Polish historian and documentary maker. The Górka post suggested that he fully supported the Polish government’s decision to change Belwederska Street to Stepan Bandera Street.

“In a statement to the DFRLab, Górka said his account was accessed without his consent. “This is not my post loaded to my Facebook page,” he explained. “My site was hacked, some days ago.” At the time of publishing, Piotr Górka’s post and his Facebook account were no longer accessible.

“The post on Górka’s Facebook page was shared by Dariusz Walusiak’s Facebook account; the account also reposted it on the Facebook walls of more than twenty other Facebook users, including Adam Kalita, currently working at Krakow branch of the Institute of National Remembrance; Jan Kasprzyk, head of the Office for War Veterans and Victims of Oppression; and Alicja Kondraciuk, a Polish public figure living in Krakow.

“Walusiak’s Facebook account is also no longer accessible. Given his work on Polish history and identity, it seems highly unlikely he would support the Bandera measure; the DFRLab has also reached out to him for comment.

“The fact that Joker DPR’s Telegram post included screenshots of their Facebook posts raises the strong possibility that both Facebook accounts were compromised, and that hackers planted false statements on their pages that would seem out of character for them in order to gain further attention to the forged documents.”


In this example, threat actors used compromised accounts of Polish historians who have enough relevant knowledge to plausibly weigh in on the forged letter’s narrative (T0143.003: Impersonated Persona, T0097.101: Local Persona, T0097.108: Expert Persona, T0146: Account, T0150.005: Compromised, T0151.001: Social Media Platform).

This matches T0097.108: Expert Persona because the impersonation exploited Górka and Walusiak’s existing personas as experts in Polish history. | +| [T0097.101 Local Persona](../../generated_pages/techniques/T0097.101.md) | IT00000238 “The August 17 [2022] Telegram post [which contained a falsified letter from the Ukrainian Minister of Foreign Affairs asking Poland to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII] also contained screenshots of Facebook posts that appeared on two Facebook accounts belonging to Polish nationals Piotr Górka, an expert in the history of the Polish Air Force, and Dariusz Walusiak, a Polish historian and documentary maker. The Górka post suggested that he fully supported the Polish government’s decision to change Belwederska Street to Stepan Bandera Street.

“In a statement to the DFRLab, Górka said his account was accessed without his consent. “This is not my post loaded to my Facebook page,” he explained. “My site was hacked, some days ago.” At the time of publishing, Piotr Górka’s post and his Facebook account were no longer accessible.

“The post on Górka’s Facebook page was shared by Dariusz Walusiak’s Facebook account; the account also reposted it on the Facebook walls of more than twenty other Facebook users, including Adam Kalita, currently working at Krakow branch of the Institute of National Remembrance; Jan Kasprzyk, head of the Office for War Veterans and Victims of Oppression; and Alicja Kondraciuk, a Polish public figure living in Krakow.

“Walusiak’s Facebook account is also no longer accessible. Given his work on Polish history and identity, it seems highly unlikely he would support the Bandera measure; the DFRLab has also reached out to him for comment.

“The fact that Joker DPR’s Telegram post included screenshots of their Facebook posts raises the strong possibility that both Facebook accounts were compromised, and that hackers planted false statements on their pages that would seem out of character for them in order to gain further attention to the forged documents.”


In this example, threat actors used compromised accounts of Polish historians who have enough relevant knowledge to plausibly weigh in on the forged letter’s narrative (T0143.003: Impersonated Persona, T0097.101: Local Persona, T0097.108: Expert Persona, T0146: Account Asset, T0150.005: Compromised Asset, T0151.001: Social Media Platform).

This matches T0097.108: Expert Persona because the impersonation exploited Górka and Walusiak’s existing personas as experts in Polish history. | +| [T0097.108 Expert Persona](../../generated_pages/techniques/T0097.108.md) | IT00000239 “The August 17 [2022] Telegram post [which contained a falsified letter from the Ukrainian Minister of Foreign Affairs asking Poland to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII] also contained screenshots of Facebook posts that appeared on two Facebook accounts belonging to Polish nationals Piotr Górka, an expert in the history of the Polish Air Force, and Dariusz Walusiak, a Polish historian and documentary maker. The Górka post suggested that he fully supported the Polish government’s decision to change Belwederska Street to Stepan Bandera Street.

“In a statement to the DFRLab, Górka said his account was accessed without his consent. “This is not my post loaded to my Facebook page,” he explained. “My site was hacked, some days ago.” At the time of publishing, Piotr Górka’s post and his Facebook account were no longer accessible.

“The post on Górka’s Facebook page was shared by Dariusz Walusiak’s Facebook account; the account also reposted it on the Facebook walls of more than twenty other Facebook users, including Adam Kalita, currently working at Krakow branch of the Institute of National Remembrance; Jan Kasprzyk, head of the Office for War Veterans and Victims of Oppression; and Alicja Kondraciuk, a Polish public figure living in Krakow.

“Walusiak’s Facebook account is also no longer accessible. Given his work on Polish history and identity, it seems highly unlikely he would support the Bandera measure; the DFRLab has also reached out to him for comment.

“The fact that Joker DPR’s Telegram post included screenshots of their Facebook posts raises the strong possibility that both Facebook accounts were compromised, and that hackers planted false statements on their pages that would seem out of character for them in order to gain further attention to the forged documents.”


In this example, threat actors used compromised accounts of Polish historians who have enough relevant knowledge to plausibly weigh in on the forged letter’s narrative (T0143.003: Impersonated Persona, T0097.101: Local Persona, T0097.108: Expert Persona, T0146: Account Asset, T0150.005: Compromised Asset, T0151.001: Social Media Platform).

This matches T0097.108: Expert Persona because the impersonation exploited Górka and Walusiak’s existing personas as experts in Polish history. | | [T0097.111 Government Official Persona](../../generated_pages/techniques/T0097.111.md) | IT00000327 “On August 16, 2022, pro-Kremlin Telegram channel Joker DPR (Джокер ДНР) published a forged letter allegedly written by Ukrainian Foreign Minister Dmytro Kuleba. In the letter, Kuleba supposedly asked relevant Polish authorities to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII.

[...]

The letter is not dated, and Dmytro Kuleba’s signature seems to be copied from a publicly available letter signed by him in 2021.”


In this example the Telegram channel Joker DPR published a forged letter (T0085.004: Develop Document) in which they impersonated the Ukrainian Minister of Foreign Affairs (T0097.111: Government Official Persona, T0143.003: Impersonated Persona), using Ministry letterhead (T0097.206: Government Institution Persona, T0143.003: Impersonated Persona). | | [T0097.206 Government Institution Persona](../../generated_pages/techniques/T0097.206.md) | IT00000326 “On August 16, 2022, pro-Kremlin Telegram channel Joker DPR (Джокер ДНР) published a forged letter allegedly written by Ukrainian Foreign Minister Dmytro Kuleba. In the letter, Kuleba supposedly asked relevant Polish authorities to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII.

[...]

The letter is not dated, and Dmytro Kuleba’s signature seems to be copied from a publicly available letter signed by him in 2021.”


In this example the Telegram channel Joker DPR published a forged letter (T0085.004: Develop Document) in which they impersonated the Ukrainian Minister of Foreign Affairs (T0097.111: Government Official Persona, T0143.003: Impersonated Persona), using Ministry letterhead (T0097.206: Government Institution Persona, T0143.003: Impersonated Persona). | -| [T0143.003 Impersonated Persona](../../generated_pages/techniques/T0143.003.md) | IT00000237 “The August 17 [2022] Telegram post [which contained a falsified letter from the Ukrainian Minister of Foreign Affairs asking Poland to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII] also contained screenshots of Facebook posts that appeared on two Facebook accounts belonging to Polish nationals Piotr Górka, an expert in the history of the Polish Air Force, and Dariusz Walusiak, a Polish historian and documentary maker. The Górka post suggested that he fully supported the Polish government’s decision to change Belwederska Street to Stepan Bandera Street.

“In a statement to the DFRLab, Górka said his account was accessed without his consent. “This is not my post loaded to my Facebook page,” he explained. “My site was hacked, some days ago.” At the time of publishing, Piotr Górka’s post and his Facebook account were no longer accessible.

“The post on Górka’s Facebook page was shared by Dariusz Walusiak’s Facebook account; the account also reposted it on the Facebook walls of more than twenty other Facebook users, including Adam Kalita, currently working at Krakow branch of the Institute of National Remembrance; Jan Kasprzyk, head of the Office for War Veterans and Victims of Oppression; and Alicja Kondraciuk, a Polish public figure living in Krakow.

“Walusiak’s Facebook account is also no longer accessible. Given his work on Polish history and identity, it seems highly unlikely he would support the Bandera measure; the DFRLab has also reached out to him for comment.

“The fact that Joker DPR’s Telegram post included screenshots of their Facebook posts raises the strong possibility that both Facebook accounts were compromised, and that hackers planted false statements on their pages that would seem out of character for them in order to gain further attention to the forged documents.”


In this example, threat actors used compromised accounts of Polish historians who have enough relevant knowledge to plausibly weigh in on the forged letter’s narrative (T0143.003: Impersonated Persona, T0097.101: Local Persona, T0097.108: Expert Persona, T0146: Account, T0150.005: Compromised, T0151.001: Social Media Platform).

This matches T0097.108: Expert Persona because the impersonation exploited Górka and Walusiak’s existing personas as experts in Polish history. | -| [T0150.005 Compromised](../../generated_pages/techniques/T0150.005.md) | IT00000236 “The August 17 [2022] Telegram post [which contained a falsified letter from the Ukrainian Minister of Foreign Affairs asking Poland to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII] also contained screenshots of Facebook posts that appeared on two Facebook accounts belonging to Polish nationals Piotr Górka, an expert in the history of the Polish Air Force, and Dariusz Walusiak, a Polish historian and documentary maker. The Górka post suggested that he fully supported the Polish government’s decision to change Belwederska Street to Stepan Bandera Street.

“In a statement to the DFRLab, Górka said his account was accessed without his consent. “This is not my post loaded to my Facebook page,” he explained. “My site was hacked, some days ago.” At the time of publishing, Piotr Górka’s post and his Facebook account were no longer accessible.

“The post on Górka’s Facebook page was shared by Dariusz Walusiak’s Facebook account; the account also reposted it on the Facebook walls of more than twenty other Facebook users, including Adam Kalita, currently working at Krakow branch of the Institute of National Remembrance; Jan Kasprzyk, head of the Office for War Veterans and Victims of Oppression; and Alicja Kondraciuk, a Polish public figure living in Krakow.

“Walusiak’s Facebook account is also no longer accessible. Given his work on Polish history and identity, it seems highly unlikely he would support the Bandera measure; the DFRLab has also reached out to him for comment.

“The fact that Joker DPR’s Telegram post included screenshots of their Facebook posts raises the strong possibility that both Facebook accounts were compromised, and that hackers planted false statements on their pages that would seem out of character for them in order to gain further attention to the forged documents.”


In this example, threat actors used compromised accounts of Polish historians who have enough relevant knowledge to plausibly weigh in on the forged letter’s narrative (T0143.003: Impersonated Persona, T0097.101: Local Persona, T0097.108: Expert Persona, T0146: Account, T0150.005: Compromised, T0151.001: Social Media Platform).

This matches T0097.108: Expert Persona because the impersonation exploited Górka and Walusiak’s existing personas as experts in Polish history. | +| [T0143.003 Impersonated Persona](../../generated_pages/techniques/T0143.003.md) | IT00000237 “The August 17 [2022] Telegram post [which contained a falsified letter from the Ukrainian Minister of Foreign Affairs asking Poland to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII] also contained screenshots of Facebook posts that appeared on two Facebook accounts belonging to Polish nationals Piotr Górka, an expert in the history of the Polish Air Force, and Dariusz Walusiak, a Polish historian and documentary maker. The Górka post suggested that he fully supported the Polish government’s decision to change Belwederska Street to Stepan Bandera Street.

“In a statement to the DFRLab, Górka said his account was accessed without his consent. “This is not my post loaded to my Facebook page,” he explained. “My site was hacked, some days ago.” At the time of publishing, Piotr Górka’s post and his Facebook account were no longer accessible.

“The post on Górka’s Facebook page was shared by Dariusz Walusiak’s Facebook account; the account also reposted it on the Facebook walls of more than twenty other Facebook users, including Adam Kalita, currently working at Krakow branch of the Institute of National Remembrance; Jan Kasprzyk, head of the Office for War Veterans and Victims of Oppression; and Alicja Kondraciuk, a Polish public figure living in Krakow.

“Walusiak’s Facebook account is also no longer accessible. Given his work on Polish history and identity, it seems highly unlikely he would support the Bandera measure; the DFRLab has also reached out to him for comment.

“The fact that Joker DPR’s Telegram post included screenshots of their Facebook posts raises the strong possibility that both Facebook accounts were compromised, and that hackers planted false statements on their pages that would seem out of character for them in order to gain further attention to the forged documents.”


In this example, threat actors used compromised accounts of Polish historians who have enough relevant knowledge to plausibly weigh in on the forged letter’s narrative (T0143.003: Impersonated Persona, T0097.101: Local Persona, T0097.108: Expert Persona, T0146: Account Asset, T0150.005: Compromised Asset, T0151.001: Social Media Platform).

This matches T0097.108: Expert Persona because the impersonation exploited Górka and Walusiak’s existing personas as experts in Polish history. | +| [T0150.005 Compromised Asset](../../generated_pages/techniques/T0150.005.md) | IT00000236 “The August 17 [2022] Telegram post [which contained a falsified letter from the Ukrainian Minister of Foreign Affairs asking Poland to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII] also contained screenshots of Facebook posts that appeared on two Facebook accounts belonging to Polish nationals Piotr Górka, an expert in the history of the Polish Air Force, and Dariusz Walusiak, a Polish historian and documentary maker. The Górka post suggested that he fully supported the Polish government’s decision to change Belwederska Street to Stepan Bandera Street.

“In a statement to the DFRLab, Górka said his account was accessed without his consent. “This is not my post loaded to my Facebook page,” he explained. “My site was hacked, some days ago.” At the time of publishing, Piotr Górka’s post and his Facebook account were no longer accessible.

“The post on Górka’s Facebook page was shared by Dariusz Walusiak’s Facebook account; the account also reposted it on the Facebook walls of more than twenty other Facebook users, including Adam Kalita, currently working at Krakow branch of the Institute of National Remembrance; Jan Kasprzyk, head of the Office for War Veterans and Victims of Oppression; and Alicja Kondraciuk, a Polish public figure living in Krakow.

“Walusiak’s Facebook account is also no longer accessible. Given his work on Polish history and identity, it seems highly unlikely he would support the Bandera measure; the DFRLab has also reached out to him for comment.

“The fact that Joker DPR’s Telegram post included screenshots of their Facebook posts raises the strong possibility that both Facebook accounts were compromised, and that hackers planted false statements on their pages that would seem out of character for them in order to gain further attention to the forged documents.”


In this example, threat actors used compromised accounts of Polish historians who have enough relevant knowledge to plausibly weigh in on the forged letter’s narrative (T0143.003: Impersonated Persona, T0097.101: Local Persona, T0097.108: Expert Persona, T0146: Account Asset, T0150.005: Compromised Asset, T0151.001: Social Media Platform).

This matches T0097.108: Expert Persona because the impersonation exploited Górka and Walusiak’s existing personas as experts in Polish history. | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00072.md b/generated_pages/incidents/I00072.md index 8fb9b28..148899e 100644 --- a/generated_pages/incidents/I00072.md +++ b/generated_pages/incidents/I00072.md @@ -21,10 +21,10 @@ | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0097.204 Think Tank Persona](../../generated_pages/techniques/T0097.204.md) | IT00000244 “The creator of Geopolitika[.]ru is Aleksandr Dugin, who was sanctioned by the United States Department of Treasury in 2015 for his role in the Eurasian Youth Union “for being responsible for or complicit in actions or policies that threaten the peace, security, stability, or sovereignty or territorial integrity of Ukraine.”

[...]

“Currently, the website geopolika[.]ru redirects directly to another partner website, Katehon.

“Katehon poses itself as a think tank focused on geopolitics in an English edition of its website. In contrast, in Russian, it states its aim to develop “ideological, political, diplomatic, economic and military strategy for Russia of the future” with a special role of religion. The president of Katehon’s supervisory board is Konstantin Malofeev, a Russian millionaire with connections to the Russian orthodox church and presidential administration, who founded Tsargrad TV, a known source of disinformation. Malofeev was sanctioned by the U.S. Department of Treasury and the European Union in 2014 for material support and financial backing of Russian-backed separatists in eastern Ukraine. Another known figure from the board is Sergei Glaziev, former advisor to Putin in 2012–2019. Dugin is also on the board in the Russian edition of the website, whereas he is omitted in English.”
In this example a domain managed by an actor previously sanctioned by the US department of treasury has been reconfigured to redirect to another website; Katehon (T0149.004: Redirecting Domain, T0150.004: Repurposed).

Katehon presents itself as a geopolitical think tank in English, but does not maintain this persona when presenting itself to a Russian speaking audience (T0097.204: Think Tank Persona, T0152.004: Website, T0155.004: Geoblocked). | -| [T0149.004 Redirecting Domain](../../generated_pages/techniques/T0149.004.md) | IT00000243 “The creator of Geopolitika[.]ru is Aleksandr Dugin, who was sanctioned by the United States Department of Treasury in 2015 for his role in the Eurasian Youth Union “for being responsible for or complicit in actions or policies that threaten the peace, security, stability, or sovereignty or territorial integrity of Ukraine.”

[...]

“Currently, the website geopolika[.]ru redirects directly to another partner website, Katehon.

“Katehon poses itself as a think tank focused on geopolitics in an English edition of its website. In contrast, in Russian, it states its aim to develop “ideological, political, diplomatic, economic and military strategy for Russia of the future” with a special role of religion. The president of Katehon’s supervisory board is Konstantin Malofeev, a Russian millionaire with connections to the Russian orthodox church and presidential administration, who founded Tsargrad TV, a known source of disinformation. Malofeev was sanctioned by the U.S. Department of Treasury and the European Union in 2014 for material support and financial backing of Russian-backed separatists in eastern Ukraine. Another known figure from the board is Sergei Glaziev, former advisor to Putin in 2012–2019. Dugin is also on the board in the Russian edition of the website, whereas he is omitted in English.”
In this example a domain managed by an actor previously sanctioned by the US department of treasury has been reconfigured to redirect to another website; Katehon (T0149.004: Redirecting Domain, T0150.004: Repurposed).

Katehon presents itself as a geopolitical think tank in English, but does not maintain this persona when presenting itself to a Russian speaking audience (T0097.204: Think Tank Persona, T0152.004: Website, T0155.004: Geoblocked). | -| [T0150.004 Repurposed](../../generated_pages/techniques/T0150.004.md) | IT00000345 “The creator of Geopolitika[.]ru is Aleksandr Dugin, who was sanctioned by the United States Department of Treasury in 2015 for his role in the Eurasian Youth Union “for being responsible for or complicit in actions or policies that threaten the peace, security, stability, or sovereignty or territorial integrity of Ukraine.”

[...]

“Currently, the website geopolika[.]ru redirects directly to another partner website, Katehon.

“Katehon poses itself as a think tank focused on geopolitics in an English edition of its website. In contrast, in Russian, it states its aim to develop “ideological, political, diplomatic, economic and military strategy for Russia of the future” with a special role of religion. The president of Katehon’s supervisory board is Konstantin Malofeev, a Russian millionaire with connections to the Russian orthodox church and presidential administration, who founded Tsargrad TV, a known source of disinformation. Malofeev was sanctioned by the U.S. Department of Treasury and the European Union in 2014 for material support and financial backing of Russian-backed separatists in eastern Ukraine. Another known figure from the board is Sergei Glaziev, former advisor to Putin in 2012–2019. Dugin is also on the board in the Russian edition of the website, whereas he is omitted in English.”
In this example a domain managed by an actor previously sanctioned by the US department of treasury has been reconfigured to redirect to another website; Katehon (T0149.004: Redirecting Domain, T0150.004: Repurposed).

Katehon presents itself as a geopolitical think tank in English, but does not maintain this persona when presenting itself to a Russian speaking audience (T0097.204: Think Tank Persona, T0152.004: Website, T0155.004: Geoblocked). | -| [T0155.004 Geoblocked](../../generated_pages/techniques/T0155.004.md) | IT00000346 “The creator of Geopolitika[.]ru is Aleksandr Dugin, who was sanctioned by the United States Department of Treasury in 2015 for his role in the Eurasian Youth Union “for being responsible for or complicit in actions or policies that threaten the peace, security, stability, or sovereignty or territorial integrity of Ukraine.”

[...]

“Currently, the website geopolika[.]ru redirects directly to another partner website, Katehon.

“Katehon poses itself as a think tank focused on geopolitics in an English edition of its website. In contrast, in Russian, it states its aim to develop “ideological, political, diplomatic, economic and military strategy for Russia of the future” with a special role of religion. The president of Katehon’s supervisory board is Konstantin Malofeev, a Russian millionaire with connections to the Russian orthodox church and presidential administration, who founded Tsargrad TV, a known source of disinformation. Malofeev was sanctioned by the U.S. Department of Treasury and the European Union in 2014 for material support and financial backing of Russian-backed separatists in eastern Ukraine. Another known figure from the board is Sergei Glaziev, former advisor to Putin in 2012–2019. Dugin is also on the board in the Russian edition of the website, whereas he is omitted in English.”
In this example a domain managed by an actor previously sanctioned by the US department of treasury has been reconfigured to redirect to another website; Katehon (T0149.004: Redirecting Domain, T0150.004: Repurposed).

Katehon presents itself as a geopolitical think tank in English, but does not maintain this persona when presenting itself to a Russian speaking audience (T0097.204: Think Tank Persona, T0152.004: Website, T0155.004: Geoblocked). | +| [T0097.204 Think Tank Persona](../../generated_pages/techniques/T0097.204.md) | IT00000244 “The creator of Geopolitika[.]ru is Aleksandr Dugin, who was sanctioned by the United States Department of Treasury in 2015 for his role in the Eurasian Youth Union “for being responsible for or complicit in actions or policies that threaten the peace, security, stability, or sovereignty or territorial integrity of Ukraine.”

[...]

“Currently, the website geopolika[.]ru redirects directly to another partner website, Katehon.

“Katehon poses itself as a think tank focused on geopolitics in an English edition of its website. In contrast, in Russian, it states its aim to develop “ideological, political, diplomatic, economic and military strategy for Russia of the future” with a special role of religion. The president of Katehon’s supervisory board is Konstantin Malofeev, a Russian millionaire with connections to the Russian orthodox church and presidential administration, who founded Tsargrad TV, a known source of disinformation. Malofeev was sanctioned by the U.S. Department of Treasury and the European Union in 2014 for material support and financial backing of Russian-backed separatists in eastern Ukraine. Another known figure from the board is Sergei Glaziev, former advisor to Putin in 2012–2019. Dugin is also on the board in the Russian edition of the website, whereas he is omitted in English.”


In this example a domain managed by an actor previously sanctioned by the US department of treasury has been reconfigured to redirect to another website; Katehon (T0149.004: Redirecting Domain Asset, T0150.004: Repurposed Asset).

Katehon presents itself as a geopolitical think tank in English, but does not maintain this persona when presenting itself to a Russian speaking audience (T0097.204: Think Tank Persona, T0152.004: Website Asset, T0155.004: Geoblocked Asset). | +| [T0149.004 Redirecting Domain Asset](../../generated_pages/techniques/T0149.004.md) | IT00000243 “The creator of Geopolitika[.]ru is Aleksandr Dugin, who was sanctioned by the United States Department of Treasury in 2015 for his role in the Eurasian Youth Union “for being responsible for or complicit in actions or policies that threaten the peace, security, stability, or sovereignty or territorial integrity of Ukraine.”

[...]

“Currently, the website geopolika[.]ru redirects directly to another partner website, Katehon.

“Katehon poses itself as a think tank focused on geopolitics in an English edition of its website. In contrast, in Russian, it states its aim to develop “ideological, political, diplomatic, economic and military strategy for Russia of the future” with a special role of religion. The president of Katehon’s supervisory board is Konstantin Malofeev, a Russian millionaire with connections to the Russian orthodox church and presidential administration, who founded Tsargrad TV, a known source of disinformation. Malofeev was sanctioned by the U.S. Department of Treasury and the European Union in 2014 for material support and financial backing of Russian-backed separatists in eastern Ukraine. Another known figure from the board is Sergei Glaziev, former advisor to Putin in 2012–2019. Dugin is also on the board in the Russian edition of the website, whereas he is omitted in English.”


In this example a domain managed by an actor previously sanctioned by the US department of treasury has been reconfigured to redirect to another website; Katehon (T0149.004: Redirecting Domain Asset, T0150.004: Repurposed Asset).

Katehon presents itself as a geopolitical think tank in English, but does not maintain this persona when presenting itself to a Russian speaking audience (T0097.204: Think Tank Persona, T0152.004: Website Asset, T0155.004: Geoblocked Asset). | +| [T0150.004 Repurposed Asset](../../generated_pages/techniques/T0150.004.md) | IT00000345 “The creator of Geopolitika[.]ru is Aleksandr Dugin, who was sanctioned by the United States Department of Treasury in 2015 for his role in the Eurasian Youth Union “for being responsible for or complicit in actions or policies that threaten the peace, security, stability, or sovereignty or territorial integrity of Ukraine.”

[...]

“Currently, the website geopolika[.]ru redirects directly to another partner website, Katehon.

“Katehon poses itself as a think tank focused on geopolitics in an English edition of its website. In contrast, in Russian, it states its aim to develop “ideological, political, diplomatic, economic and military strategy for Russia of the future” with a special role of religion. The president of Katehon’s supervisory board is Konstantin Malofeev, a Russian millionaire with connections to the Russian orthodox church and presidential administration, who founded Tsargrad TV, a known source of disinformation. Malofeev was sanctioned by the U.S. Department of Treasury and the European Union in 2014 for material support and financial backing of Russian-backed separatists in eastern Ukraine. Another known figure from the board is Sergei Glaziev, former advisor to Putin in 2012–2019. Dugin is also on the board in the Russian edition of the website, whereas he is omitted in English.”


In this example a domain managed by an actor previously sanctioned by the US department of treasury has been reconfigured to redirect to another website; Katehon (T0149.004: Redirecting Domain Asset, T0150.004: Repurposed Asset).

Katehon presents itself as a geopolitical think tank in English, but does not maintain this persona when presenting itself to a Russian speaking audience (T0097.204: Think Tank Persona, T0152.004: Website Asset, T0155.004: Geoblocked Asset). | +| [T0155.004 Geoblocked Asset](../../generated_pages/techniques/T0155.004.md) | IT00000346 “The creator of Geopolitika[.]ru is Aleksandr Dugin, who was sanctioned by the United States Department of Treasury in 2015 for his role in the Eurasian Youth Union “for being responsible for or complicit in actions or policies that threaten the peace, security, stability, or sovereignty or territorial integrity of Ukraine.”

[...]

“Currently, the website geopolika[.]ru redirects directly to another partner website, Katehon.

“Katehon poses itself as a think tank focused on geopolitics in an English edition of its website. In contrast, in Russian, it states its aim to develop “ideological, political, diplomatic, economic and military strategy for Russia of the future” with a special role of religion. The president of Katehon’s supervisory board is Konstantin Malofeev, a Russian millionaire with connections to the Russian orthodox church and presidential administration, who founded Tsargrad TV, a known source of disinformation. Malofeev was sanctioned by the U.S. Department of Treasury and the European Union in 2014 for material support and financial backing of Russian-backed separatists in eastern Ukraine. Another known figure from the board is Sergei Glaziev, former advisor to Putin in 2012–2019. Dugin is also on the board in the Russian edition of the website, whereas he is omitted in English.”


In this example a domain managed by an actor previously sanctioned by the US department of treasury has been reconfigured to redirect to another website; Katehon (T0149.004: Redirecting Domain Asset, T0150.004: Repurposed Asset).

Katehon presents itself as a geopolitical think tank in English, but does not maintain this persona when presenting itself to a Russian speaking audience (T0097.204: Think Tank Persona, T0152.004: Website Asset, T0155.004: Geoblocked Asset). | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00074.md b/generated_pages/incidents/I00074.md index 168031b..ac55031 100644 --- a/generated_pages/incidents/I00074.md +++ b/generated_pages/incidents/I00074.md @@ -24,7 +24,7 @@ | [T0097.106 Recruiter Persona](../../generated_pages/techniques/T0097.106.md) | IT00000248 “A few press investigations have alluded to the [Russia’s Internet Research Agency]’s job ads. The extent of the human asset recruitment strategy is revealed in the organic data set. It is expansive, and was clearly a priority. Posts encouraging Americans to perform various types of tasks for IRA handlers appeared in Black, Left, and Right-targeted groups, though they were most numerous in the Black community. They included:

- Requests for contact with preachers from Black churches (Black_Baptist_Church)
- Offers of free counsellingcounseling to people with sexual addiction (Army of Jesus)
- Soliciting volunteers to hand out fliers
- Soliciting volunteers to teach self-defense classes
- Offering free self-defense classes (Black Fist/Fit Black)
- Requests for followers to attend political rallies
- Requests for photographers to document protests
- Requests for speakers at protests
- Requests to protest the Westborough Baptist Church (LGBT United)
- Job offers for designers to help design fliers, sites, Facebook sticker packs
- Requests for female followers to send photos for a calendar
- Requests for followers to send photos to be shared to the Page (Back the Badge)
- Soliciting videos for a YouTube contest called “Pee on Hillary”
- Encouraging people to apply to be part of a Black reality TV show
- Posting a wide variety of job ads (write for BlackMattersUS and others)
- Requests for lawyers to volunteer to assist with immigration cases”


This behaviour matches T0097.106: Recruiter Persona because the threat actors are presenting tasks for their target audience to complete in the style of a job posting (even though some of the tasks were presented as voluntary / unpaid efforts), including calls for people to attend political rallies (T0126.001: Call to Action to Attend). | | [T0097.202 News Outlet Persona](../../generated_pages/techniques/T0097.202.md) | IT00000250 “The Black Matters Facebook Page [operated by Russia’s Internet Research Agency] explored several visual brand identities, moving from a plain logo to a gothic typeface on Jan 19th, 2016. On February 4th, 2016, the person who ran the Facebook Page announced the launch of the website, blackmattersus[.]com, emphasizing media distrust and a desire to build Black independent media; [“I DIDN’T BELIEVE THE MEDIA / SO I BECAME ONE”]”

In this example an asset controlled by Russia’s Internet Research Agency began to present itself as a source of “Black independent media”, claiming that the media could not be trusted (T0097.208: Social Cause Persona, T0097.202: News Outlet Persona, T0143.002: Fabricated Persona). | | [T0097.204 Think Tank Persona](../../generated_pages/techniques/T0097.204.md) | IT00000245 “[Russia’s Internet Research Agency, the IRA] pushed narratives with longform blog content. They created media properties, websites designed to produce stories that would resonate with those targeted. It appears, based on the data set provided by Alphabet, that the IRA may have also expanded into think tank-style communiques. One such page, previously unattributed to the IRA but included in the Alphabet data, was GI Analytics, a geopolitics blog with an international masthead that included American authors. This page was promoted via AdWords and YouTube videos; it has strong ties to more traditional Russian propaganda networks, which will be discussed later in this analysis. GI Analytics wrote articles articulating nuanced academic positions on a variety of sophisticated topics. From the site’s About page:

““Our purpose and mission are to provide high-quality analysis at a time when we are faced with a multitude of crises, a collapsing global economy, imperialist wars, environmental disasters, corporate greed, terrorism, deceit, GMO food, a migration crisis and a crackdown on small farmers and ranchers.””


In this example Alphabet’s technical indicators allowed them to assert that GI Analytics, which presented itself as a think tank, was a fabricated institution associated with Russia’s Internet Research Agency (T0097.204: Think Tank Persona, T0143.002: Fabricated Persona). | -| [T0097.208 Social Cause Persona](../../generated_pages/techniques/T0097.208.md) | IT00000251 “The Black Matters Facebook Page [operated by Russia’s Internet Research Agency] explored several visual brand identities, moving from a plain logo to a gothic typeface on Jan 19th, 2016. On February 4th, 2016, the person who ran the Facebook Page announced the launch of the website, blackmattersus[.]com, emphasizing media distrust and a desire to build Black independent media; [“I DIDN’T BELIEVE THE MEDIA / SO I BECAME ONE”]”

In this example an asset controlled by Russia’s Internet Research Agency began to present itself as a source of “Black independent media”, claiming that the media could not be trusted (T0097.208: Social Cause Persona, T0097.202: News Outlet Persona, T0143.002: Fabricated Persona). | +| [T0097.208 Social Cause Persona](../../generated_pages/techniques/T0097.208.md) | IT00000251 “The Black Matters Facebook Page [operated by Russia’s Internet Research Agency] explored several visual brand identities, moving from a plain logo to a gothic typeface on Jan 19th, 2016. On February 4th, 2016, the person who ran the Facebook Page announced the launch of the website, blackmattersus[.]com, emphasizing media distrust and a desire to build Black independent media; [“I DIDN’T BELIEVE THE MEDIA / SO I BECAME ONE”]”

In this example an asset controlled by Russia’s Internet Research Agency began to present itself as a source of “Black independent media”, claiming that the media could not be trusted (T0097.208: Social Cause Persona, T0097.202: News Outlet Persona, T0143.002: Fabricated Persona). | | [T0126.001 Call to Action to Attend](../../generated_pages/techniques/T0126.001.md) | IT00000247 “A few press investigations have alluded to the [Russia’s Internet Research Agency]’s job ads. The extent of the human asset recruitment strategy is revealed in the organic data set. It is expansive, and was clearly a priority. Posts encouraging Americans to perform various types of tasks for IRA handlers appeared in Black, Left, and Right-targeted groups, though they were most numerous in the Black community. They included:

- Requests for contact with preachers from Black churches (Black_Baptist_Church)
- Offers of free counsellingcounseling to people with sexual addiction (Army of Jesus)
- Soliciting volunteers to hand out fliers
- Soliciting volunteers to teach self-defense classes
- Offering free self-defense classes (Black Fist/Fit Black)
- Requests for followers to attend political rallies
- Requests for photographers to document protests
- Requests for speakers at protests
- Requests to protest the Westborough Baptist Church (LGBT United)
- Job offers for designers to help design fliers, sites, Facebook sticker packs
- Requests for female followers to send photos for a calendar
- Requests for followers to send photos to be shared to the Page (Back the Badge)
- Soliciting videos for a YouTube contest called “Pee on Hillary”
- Encouraging people to apply to be part of a Black reality TV show
- Posting a wide variety of job ads (write for BlackMattersUS and others)
- Requests for lawyers to volunteer to assist with immigration cases”


This behaviour matches T0097.106: Recruiter Persona because the threat actors are presenting tasks for their target audience to complete in the style of a job posting (even though some of the tasks were presented as voluntary / unpaid efforts), including calls for people to attend political rallies (T0126.001: Call to Action to Attend). | | [T0143.002 Fabricated Persona](../../generated_pages/techniques/T0143.002.md) | IT00000249 “The Black Matters Facebook Page [operated by Russia’s Internet Research Agency] explored several visual brand identities, moving from a plain logo to a gothic typeface on Jan 19th, 2016. On February 4th, 2016, the person who ran the Facebook Page announced the launch of the website, blackmattersus[.]com, emphasizing media distrust and a desire to build Black independent media; [“I DIDN’T BELIEVE THE MEDIA / SO I BECAME ONE”]”

In this example an asset controlled by Russia’s Internet Research Agency began to present itself as a source of “Black independent media”, claiming that the media could not be trusted (T0097.208: Social Cause Persona, T0097.202: News Outlet Persona, T0143.002: Fabricated Persona). | diff --git a/generated_pages/incidents/I00084.md b/generated_pages/incidents/I00084.md index 516bd39..34f65d3 100644 --- a/generated_pages/incidents/I00084.md +++ b/generated_pages/incidents/I00084.md @@ -26,6 +26,7 @@ | [T0131 Exploit TOS/Content Moderation](../../generated_pages/techniques/T0131.md) | IT00000338 “After the European Union banned Kremlin-backed media outlets and social media giants demoted their posts for peddling falsehoods about the war in Ukraine, Moscow has turned to its cadre of diplomats, government spokespeople and ministers — many of whom have extensive followings on social media — to promote disinformation about the conflict in Eastern Europe, according to four EU and United States officials.”

In this example authentic Russian government officials used their own accounts to promote false narratives (T0143.001: Authentic Persona, T0097.111: Government Official Persona).

The use of accounts managed by authentic Government / Diplomats to spread false narratives makes it harder for platforms to enforce content moderation, because of the political ramifications they may face for censoring elected officials (T0131: Exploit TOS/Content Moderation). For example, Twitter previously argued that official channels of world leaders are not removed due to the high public interest associated with their activities. | | [T0143.001 Authentic Persona](../../generated_pages/techniques/T0143.001.md) | IT00000336 “After the European Union banned Kremlin-backed media outlets and social media giants demoted their posts for peddling falsehoods about the war in Ukraine, Moscow has turned to its cadre of diplomats, government spokespeople and ministers — many of whom have extensive followings on social media — to promote disinformation about the conflict in Eastern Europe, according to four EU and United States officials.”

In this example authentic Russian government officials used their own accounts to promote false narratives (T0143.001: Authentic Persona, T0097.111: Government Official Persona).

The use of accounts managed by authentic Government / Diplomats to spread false narratives makes it harder for platforms to enforce content moderation, because of the political ramifications they may face for censoring elected officials (T0131: Exploit TOS/Content Moderation). For example, Twitter previously argued that official channels of world leaders are not removed due to the high public interest associated with their activities. | | [T0151.004 Chat Platform](../../generated_pages/techniques/T0151.004.md) | IT00000294 “[Russia’s social media] reach isn't the same as Russian state media, but they are trying to recreate what RT and Sputnik had done," said one EU official involved in tracking Russian disinformation. "It's a coordinated effort that goes beyond social media and involves specific websites."

“Central to that wider online playbook is a Telegram channel called Warfakes and an affiliated website. Since the beginning of the conflict, that social media channel has garnered more than 725,000 members and repeatedly shares alleged fact-checks aimed at debunking Ukrainian narratives, using language similar to Western-style fact-checking outlets.”


In this example a Telegram channel (T0151.004: Chat Platform, T0155.007: Encrypted Communication Channel) was established which presented itself as a source of fact checks (T0097.203: Fact Checking Organisation Persona). | +| [T0151.004 Chat Platform](../../generated_pages/techniques/T0151.004.md) |  IT00000548 “[Russia’s social media] reach isn't the same as Russian state media, but they are trying to recreate what RT and Sputnik had done," said one EU official involved in tracking Russian disinformation. "It's a coordinated effort that goes beyond social media and involves specific websites."

“Central to that wider online playbook is a Telegram channel called Warfakes and an affiliated website. Since the beginning of the conflict, that social media channel has garnered more than 725,000 members and repeatedly shares alleged fact-checks aimed at debunking Ukrainian narratives, using language similar to Western-style fact-checking outlets.”


In this example a Telegram channel (T0151.004: Chat Platform, T0155.007: Encrypted Communication Channel) was established which presented itself as a source of fact checks (T0097.203: Fact Checking Organisation Persona). | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00089.md b/generated_pages/incidents/I00089.md index 40772df..24affe2 100644 --- a/generated_pages/incidents/I00089.md +++ b/generated_pages/incidents/I00089.md @@ -21,6 +21,7 @@ | Technique | Description given for this incident | | --------- | ------------------------- | +| [T0097.109 Romantic Suitor Persona](../../generated_pages/techniques/T0097.109.md) | IT00000313 “On Facebook, Rita, Alona and Christina appeared to be just like the millions of other U.S citizens sharing their lives with the world. They discussed family outings, shared emojis and commented on each other's photographs.

“In reality, the three accounts were part of a highly-targeted cybercrime operation, used to spread malware that was able to steal passwords and spy on victims.

“Hackers with links to Lebanon likely ran the covert scheme using a strain of malware dubbed "Tempting Cedar Spyware," according to researchers from Prague-based anti-virus company Avast, which detailed its findings in a report released on Wednesday.

“In a honey trap tactic as old as time, the culprits' targets were mostly male, and lured by fake attractive women. 

“In the attack, hackers would send flirtatious messages using Facebook to the chosen victims, encouraging them to download a second , booby-trapped, chat application known as Kik Messenger to have "more secure" conversations. Upon analysis, Avast experts found that "many fell for the trap.””


In this example threat actors took on the persona of a romantic suitor on Facebook, directing their targets to another platform (T0097:109 Romantic Suitor Persona, T0145.006: Attractive Person Account Imagery, T0143.002: Fabricated Persona). | | [T0143.002 Fabricated Persona](../../generated_pages/techniques/T0143.002.md) | IT00000314 “On Facebook, Rita, Alona and Christina appeared to be just like the millions of other U.S citizens sharing their lives with the world. They discussed family outings, shared emojis and commented on each other's photographs.

“In reality, the three accounts were part of a highly-targeted cybercrime operation, used to spread malware that was able to steal passwords and spy on victims.

“Hackers with links to Lebanon likely ran the covert scheme using a strain of malware dubbed "Tempting Cedar Spyware," according to researchers from Prague-based anti-virus company Avast, which detailed its findings in a report released on Wednesday.

“In a honey trap tactic as old as time, the culprits' targets were mostly male, and lured by fake attractive women. 

“In the attack, hackers would send flirtatious messages using Facebook to the chosen victims, encouraging them to download a second , booby-trapped, chat application known as Kik Messenger to have "more secure" conversations. Upon analysis, Avast experts found that "many fell for the trap.””


In this example threat actors took on the persona of a romantic suitor on Facebook, directing their targets to another platform (T0097:109 Romantic Suitor Persona, T0145.006: Attractive Person Account Imagery, T0143.002: Fabricated Persona). | | [T0145.006 Attractive Person Account Imagery](../../generated_pages/techniques/T0145.006.md) | IT00000312 “On Facebook, Rita, Alona and Christina appeared to be just like the millions of other U.S citizens sharing their lives with the world. They discussed family outings, shared emojis and commented on each other's photographs.

“In reality, the three accounts were part of a highly-targeted cybercrime operation, used to spread malware that was able to steal passwords and spy on victims.

“Hackers with links to Lebanon likely ran the covert scheme using a strain of malware dubbed "Tempting Cedar Spyware," according to researchers from Prague-based anti-virus company Avast, which detailed its findings in a report released on Wednesday.

“In a honey trap tactic as old as time, the culprits' targets were mostly male, and lured by fake attractive women. 

“In the attack, hackers would send flirtatious messages using Facebook to the chosen victims, encouraging them to download a second , booby-trapped, chat application known as Kik Messenger to have "more secure" conversations. Upon analysis, Avast experts found that "many fell for the trap.””


In this example threat actors took on the persona of a romantic suitor on Facebook, directing their targets to another platform (T0097:109 Romantic Suitor Persona, T0145.006: Attractive Person Account Imagery, T0143.002: Fabricated Persona). | diff --git a/generated_pages/incidents/I00093.md b/generated_pages/incidents/I00093.md index cc11936..2db8f70 100644 --- a/generated_pages/incidents/I00093.md +++ b/generated_pages/incidents/I00093.md @@ -21,9 +21,9 @@ | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0097.111 Government Official Persona](../../generated_pages/techniques/T0097.111.md) | IT00000343 “On October 23, Canada’s Foreign Ministry said it had discovered a disinformation campaign, likely tied to China, aimed at discrediting dozens of Canadian politicians, including Prime Minister Justin Trudeau.

“The ministry said the campaign took place in August and September. It used new and hijacked social media accounts to bulk-post messages targeting Canadian politicians (T0146: Account, T0150.001: Newly Created, T0150.005: Compromised).

“A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.

““Canada was a downright liar and disseminator of false information… Beijing has never meddled in another nation’s domestic affairs.”

“A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.

“That is false.

“The Canadian government's report is based on an investigation conducted by its Rapid Response Mechanism cyber intelligence unit in cooperation with the social media platforms.

“The investigation exposed China’s disinformation campaign dubbed “Spamouflage” -- for its tactic of using “a network of new or hijacked social media accounts that posts and increases the number of propaganda messages across multiple social media platforms – including Facebook, X/Twitter, Instagram, YouTube, Medium, Reddit, TikTok, and LinkedIn.””


In this case a network of accounts attributed to China were identified operating on multiple platforms. The report was dismissed as false information by an official in the Chinese Embassy in Canada (T0143.001: Authentic Persona, T0097.111: Government Official Persona, T0129.006: Deny Involvement). | -| [T0129.006 Deny Involvement](../../generated_pages/techniques/T0129.006.md) | IT00000344 “On October 23, Canada’s Foreign Ministry said it had discovered a disinformation campaign, likely tied to China, aimed at discrediting dozens of Canadian politicians, including Prime Minister Justin Trudeau.

“The ministry said the campaign took place in August and September. It used new and hijacked social media accounts to bulk-post messages targeting Canadian politicians (T0146: Account, T0150.001: Newly Created, T0150.005: Compromised).

“A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.

““Canada was a downright liar and disseminator of false information… Beijing has never meddled in another nation’s domestic affairs.”

“A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.

“That is false.

“The Canadian government's report is based on an investigation conducted by its Rapid Response Mechanism cyber intelligence unit in cooperation with the social media platforms.

“The investigation exposed China’s disinformation campaign dubbed “Spamouflage” -- for its tactic of using “a network of new or hijacked social media accounts that posts and increases the number of propaganda messages across multiple social media platforms – including Facebook, X/Twitter, Instagram, YouTube, Medium, Reddit, TikTok, and LinkedIn.””


In this case a network of accounts attributed to China were identified operating on multiple platforms. The report was dismissed as false information by an official in the Chinese Embassy in Canada (T0143.001: Authentic Persona, T0097.111: Government Official Persona, T0129.006: Deny Involvement). | -| [T0143.001 Authentic Persona](../../generated_pages/techniques/T0143.001.md) | IT00000342 “On October 23, Canada’s Foreign Ministry said it had discovered a disinformation campaign, likely tied to China, aimed at discrediting dozens of Canadian politicians, including Prime Minister Justin Trudeau.

“The ministry said the campaign took place in August and September. It used new and hijacked social media accounts to bulk-post messages targeting Canadian politicians (T0146: Account, T0150.001: Newly Created, T0150.005: Compromised).

“A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.

““Canada was a downright liar and disseminator of false information… Beijing has never meddled in another nation’s domestic affairs.”

“A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.

“That is false.

“The Canadian government's report is based on an investigation conducted by its Rapid Response Mechanism cyber intelligence unit in cooperation with the social media platforms.

“The investigation exposed China’s disinformation campaign dubbed “Spamouflage” -- for its tactic of using “a network of new or hijacked social media accounts that posts and increases the number of propaganda messages across multiple social media platforms – including Facebook, X/Twitter, Instagram, YouTube, Medium, Reddit, TikTok, and LinkedIn.””


In this case a network of accounts attributed to China were identified operating on multiple platforms. The report was dismissed as false information by an official in the Chinese Embassy in Canada (T0143.001: Authentic Persona, T0097.111: Government Official Persona, T0129.006: Deny Involvement). | +| [T0097.111 Government Official Persona](../../generated_pages/techniques/T0097.111.md) | IT00000343 “On October 23, Canada’s Foreign Ministry said it had discovered a disinformation campaign, likely tied to China, aimed at discrediting dozens of Canadian politicians, including Prime Minister Justin Trudeau.

“The ministry said the campaign took place in August and September. It used new and hijacked social media accounts to bulk-post messages targeting Canadian politicians (T0146: Account Asset, T0150.001: Newly Created Asset, T0150.005: Compromised Asset).

“A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.

““Canada was a downright liar and disseminator of false information… Beijing has never meddled in another nation’s domestic affairs.”

“A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.

“That is false.

“The Canadian government's report is based on an investigation conducted by its Rapid Response Mechanism cyber intelligence unit in cooperation with the social media platforms.

“The investigation exposed China’s disinformation campaign dubbed “Spamouflage” -- for its tactic of using “a network of new or hijacked social media accounts that posts and increases the number of propaganda messages across multiple social media platforms – including Facebook, X/Twitter, Instagram, YouTube, Medium, Reddit, TikTok, and LinkedIn.””


In this case a network of accounts attributed to China were identified operating on multiple platforms. The report was dismissed as false information by an official in the Chinese Embassy in Canada (T0143.001: Authentic Persona, T0097.111: Government Official Persona, T0129.006: Deny Involvement). | +| [T0129.006 Deny Involvement](../../generated_pages/techniques/T0129.006.md) | IT00000344 “On October 23, Canada’s Foreign Ministry said it had discovered a disinformation campaign, likely tied to China, aimed at discrediting dozens of Canadian politicians, including Prime Minister Justin Trudeau.

“The ministry said the campaign took place in August and September. It used new and hijacked social media accounts to bulk-post messages targeting Canadian politicians (T0146: Account Asset, T0150.001: Newly Created Asset, T0150.005: Compromised Asset).

“A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.

““Canada was a downright liar and disseminator of false information… Beijing has never meddled in another nation’s domestic affairs.”

“A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.

“That is false.

“The Canadian government's report is based on an investigation conducted by its Rapid Response Mechanism cyber intelligence unit in cooperation with the social media platforms.

“The investigation exposed China’s disinformation campaign dubbed “Spamouflage” -- for its tactic of using “a network of new or hijacked social media accounts that posts and increases the number of propaganda messages across multiple social media platforms – including Facebook, X/Twitter, Instagram, YouTube, Medium, Reddit, TikTok, and LinkedIn.””


In this case a network of accounts attributed to China were identified operating on multiple platforms. The report was dismissed as false information by an official in the Chinese Embassy in Canada (T0143.001: Authentic Persona, T0097.111: Government Official Persona, T0129.006: Deny Involvement). | +| [T0143.001 Authentic Persona](../../generated_pages/techniques/T0143.001.md) | IT00000342 “On October 23, Canada’s Foreign Ministry said it had discovered a disinformation campaign, likely tied to China, aimed at discrediting dozens of Canadian politicians, including Prime Minister Justin Trudeau.

“The ministry said the campaign took place in August and September. It used new and hijacked social media accounts to bulk-post messages targeting Canadian politicians (T0146: Account Asset, T0150.001: Newly Created Asset, T0150.005: Compromised Asset).

“A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.

““Canada was a downright liar and disseminator of false information… Beijing has never meddled in another nation’s domestic affairs.”

“A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.

“That is false.

“The Canadian government's report is based on an investigation conducted by its Rapid Response Mechanism cyber intelligence unit in cooperation with the social media platforms.

“The investigation exposed China’s disinformation campaign dubbed “Spamouflage” -- for its tactic of using “a network of new or hijacked social media accounts that posts and increases the number of propaganda messages across multiple social media platforms – including Facebook, X/Twitter, Instagram, YouTube, Medium, Reddit, TikTok, and LinkedIn.””


In this case a network of accounts attributed to China were identified operating on multiple platforms. The report was dismissed as false information by an official in the Chinese Embassy in Canada (T0143.001: Authentic Persona, T0097.111: Government Official Persona, T0129.006: Deny Involvement). | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00096.md b/generated_pages/incidents/I00096.md index 771a66e..f2ca9b5 100644 --- a/generated_pages/incidents/I00096.md +++ b/generated_pages/incidents/I00096.md @@ -15,19 +15,21 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://www.computerweekly.com/news/366579820/China-ramps-up-use-of-AI-misinformation](https://www.computerweekly.com/news/366579820/China-ramps-up-use-of-AI-misinformation) | 2024/04/05 | +Alex Scroxton | ComputerWeekly | [https://web.archive.org/web/20240405154259/https://www.computerweekly.com/news/366579820/China-ramps-up-use-of-AI-misinformation](https://web.archive.org/web/20240405154259/https://www.computerweekly.com/news/366579820/China-ramps-up-use-of-AI-misinformation) | | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0087.001 Develop AI-Generated Videos (Deepfakes)](../../generated_pages/techniques/T0087.001.md) |  IT00000352 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | -| [T0088.001 Develop AI-Generated Audio (Deepfakes)](../../generated_pages/techniques/T0088.001.md) |  IT00000347 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | -| [T0097.102 Journalist Persona](../../generated_pages/techniques/T0097.102.md) |  IT00000354 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | -| [T0097.110 Party Official Persona](../../generated_pages/techniques/T0097.110.md) |  IT00000348 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | -| [T0115 Post Content](../../generated_pages/techniques/T0115.md) |  IT00000350 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | -| [T0143.002 Fabricated Persona](../../generated_pages/techniques/T0143.002.md) |  IT00000353 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | -| [T0152.006 Video Platform](../../generated_pages/techniques/T0152.006.md) |  IT00000349 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | -| [T0154.002 AI Media Platform](../../generated_pages/techniques/T0154.002.md) |  IT00000351 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | +| [T0087.001 Develop AI-Generated Videos (Deepfakes)](../../generated_pages/techniques/T0087.001.md) |  IT00000352 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account Asset, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account Asset, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | +| [T0088.001 Develop AI-Generated Audio (Deepfakes)](../../generated_pages/techniques/T0088.001.md) |  IT00000347 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account Asset, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account Asset, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | +| [T0097.102 Journalist Persona](../../generated_pages/techniques/T0097.102.md) |  IT00000354 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account Asset, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account Asset, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | +| [T0097.110 Party Official Persona](../../generated_pages/techniques/T0097.110.md) |  IT00000348 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account Asset, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account Asset, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | +| [T0115 Post Content](../../generated_pages/techniques/T0115.md) |  IT00000350 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account Asset, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account Asset, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | +| [T0143.002 Fabricated Persona](../../generated_pages/techniques/T0143.002.md) |  IT00000353 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account Asset, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account Asset, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | +| [T0152.006 Video Platform](../../generated_pages/techniques/T0152.006.md) |  IT00000349 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account Asset, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account Asset, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | +| [T0154.002 AI Media Platform](../../generated_pages/techniques/T0154.002.md) |  IT00000351 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account Asset, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account Asset, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00097.md b/generated_pages/incidents/I00097.md index 53cc671..59eb14e 100644 --- a/generated_pages/incidents/I00097.md +++ b/generated_pages/incidents/I00097.md @@ -15,6 +15,7 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://au.reset.tech/news/report-not-just-algorithms/](https://au.reset.tech/news/report-not-just-algorithms/) | 2024/03/24 | - | Reset Australia | [https://web.archive.org/web/20240527135516/https://au.reset.tech/news/report-not-just-algorithms/](https://web.archive.org/web/20240527135516/https://au.reset.tech/news/report-not-just-algorithms/) | diff --git a/generated_pages/incidents/I00098.md b/generated_pages/incidents/I00098.md index edbd5bb..664de10 100644 --- a/generated_pages/incidents/I00098.md +++ b/generated_pages/incidents/I00098.md @@ -15,14 +15,15 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://bhr.stern.nyu.edu/wp-content/uploads/2024/01/NYUCBHRGaming_ONLINEUPDATEDMay16.pdf](https://bhr.stern.nyu.edu/wp-content/uploads/2024/01/NYUCBHRGaming_ONLINEUPDATEDMay16.pdf) | 2023/05/01 | Mariana Olaizola Rosenblat, Paul M. Barrett | NYU Stern | [https://web.archive.org/web/20240912223613/https://bhr.stern.nyu.edu/wp-content/uploads/2024/01/NYUCBHRGaming_ONLINEUPDATEDMay16.pdf](https://web.archive.org/web/20240912223613/https://bhr.stern.nyu.edu/wp-content/uploads/2024/01/NYUCBHRGaming_ONLINEUPDATEDMay16.pdf) | | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0147.001 Game](../../generated_pages/techniques/T0147.001.md) |  IT00000360 This report looks at how extremists exploit games and gaming adjacent platforms:

Ethnic Cleansing, a first-person shooter game created by the U.S. white supremacist organization National Alliance in 2002, was advertised as follows: “[T]he Race War has already begun. Your character, Will, runs through a ghetto blasting away at various blacks and spics to attempt to gain entrance to the subway system...where the jews have hidden to avoid the carnage. Then you get to blow away jews as they scream ‘Oy Vey!’ on your way to the command center.”

While games like Ethnic Cleansing —and others of a similar genocidal variety, including titles like Shoot the Blacks, Hatred, and Muslim Massacre —generate some press attention and controversy, they tend to be ignored by the large majority of gamers, who consider them “crude” and “badly designed.” Nevertheless, these games are still available in some corners of the Internet and have been downloaded by hundreds of thousands of users.

A second strategy involves the modification (“modding”) of existing video games to twist the narrative into an extremist message or fantasy. One example is the Islamic State terrorist group’s modding of the first-person shooter video game, ARMA III, to make Islamic fighters the heroic protagonists rather than the villains. With their powerful immersive quality, these video games have, in some instances, been effective at inspiring and allegedly training extremists to perpetrate realworld attacks.

Third, extremist actors have used in-game chat functions to open lines of communication with ideological sympathizers and potential recruits. Although the lack of in-game communication data has made it impossible for academic researchers to track the presence of extremists in a systematic way, there is enough anecdotal evidence—including evidence obtained from police investigation files—to infer that in-game chatrooms can and do function as “radicalization funnels” in at least some cases.


White supremacists created a game aligned with their ideology (T0147.001: Game). The Islamic State terrorist group created a mod of the game ARMA 3 (T0151.015: Online Game Platform, T0147.002: Game Mod). Extremists also use communication features available in online games to recruit new members. | -| [T0147.002 Game Mod](../../generated_pages/techniques/T0147.002.md) |  IT00000362 This report looks at how extremists exploit games and gaming adjacent platforms:

Ethnic Cleansing, a first-person shooter game created by the U.S. white supremacist organization National Alliance in 2002, was advertised as follows: “[T]he Race War has already begun. Your character, Will, runs through a ghetto blasting away at various blacks and spics to attempt to gain entrance to the subway system...where the jews have hidden to avoid the carnage. Then you get to blow away jews as they scream ‘Oy Vey!’ on your way to the command center.”

While games like Ethnic Cleansing —and others of a similar genocidal variety, including titles like Shoot the Blacks, Hatred, and Muslim Massacre —generate some press attention and controversy, they tend to be ignored by the large majority of gamers, who consider them “crude” and “badly designed.” Nevertheless, these games are still available in some corners of the Internet and have been downloaded by hundreds of thousands of users.

A second strategy involves the modification (“modding”) of existing video games to twist the narrative into an extremist message or fantasy. One example is the Islamic State terrorist group’s modding of the first-person shooter video game, ARMA III, to make Islamic fighters the heroic protagonists rather than the villains. With their powerful immersive quality, these video games have, in some instances, been effective at inspiring and allegedly training extremists to perpetrate realworld attacks.

Third, extremist actors have used in-game chat functions to open lines of communication with ideological sympathizers and potential recruits. Although the lack of in-game communication data has made it impossible for academic researchers to track the presence of extremists in a systematic way, there is enough anecdotal evidence—including evidence obtained from police investigation files—to infer that in-game chatrooms can and do function as “radicalization funnels” in at least some cases.


White supremacists created a game aligned with their ideology (T0147.001: Game). The Islamic State terrorist group created a mod of the game ARMA 3 (T0151.015: Online Game Platform, T0147.002: Game Mod). Extremists also use communication features available in online games to recruit new members. | -| [T0151.015 Online Game Platform](../../generated_pages/techniques/T0151.015.md) |  IT00000361 This report looks at how extremists exploit games and gaming adjacent platforms:

Ethnic Cleansing, a first-person shooter game created by the U.S. white supremacist organization National Alliance in 2002, was advertised as follows: “[T]he Race War has already begun. Your character, Will, runs through a ghetto blasting away at various blacks and spics to attempt to gain entrance to the subway system...where the jews have hidden to avoid the carnage. Then you get to blow away jews as they scream ‘Oy Vey!’ on your way to the command center.”

While games like Ethnic Cleansing —and others of a similar genocidal variety, including titles like Shoot the Blacks, Hatred, and Muslim Massacre —generate some press attention and controversy, they tend to be ignored by the large majority of gamers, who consider them “crude” and “badly designed.” Nevertheless, these games are still available in some corners of the Internet and have been downloaded by hundreds of thousands of users.

A second strategy involves the modification (“modding”) of existing video games to twist the narrative into an extremist message or fantasy. One example is the Islamic State terrorist group’s modding of the first-person shooter video game, ARMA III, to make Islamic fighters the heroic protagonists rather than the villains. With their powerful immersive quality, these video games have, in some instances, been effective at inspiring and allegedly training extremists to perpetrate realworld attacks.

Third, extremist actors have used in-game chat functions to open lines of communication with ideological sympathizers and potential recruits. Although the lack of in-game communication data has made it impossible for academic researchers to track the presence of extremists in a systematic way, there is enough anecdotal evidence—including evidence obtained from police investigation files—to infer that in-game chatrooms can and do function as “radicalization funnels” in at least some cases.


White supremacists created a game aligned with their ideology (T0147.001: Game). The Islamic State terrorist group created a mod of the game ARMA 3 (T0151.015: Online Game Platform, T0147.002: Game Mod). Extremists also use communication features available in online games to recruit new members. | +| [T0147.001 Game Asset](../../generated_pages/techniques/T0147.001.md) |  IT00000360 This report looks at how extremists exploit games and gaming adjacent platforms:

Ethnic Cleansing, a first-person shooter game created by the U.S. white supremacist organization National Alliance in 2002, was advertised as follows: “[T]he Race War has already begun. Your character, Will, runs through a ghetto blasting away at various blacks and spics to attempt to gain entrance to the subway system...where the jews have hidden to avoid the carnage. Then you get to blow away jews as they scream ‘Oy Vey!’ on your way to the command center.”

While games like Ethnic Cleansing —and others of a similar genocidal variety, including titles like Shoot the Blacks, Hatred, and Muslim Massacre —generate some press attention and controversy, they tend to be ignored by the large majority of gamers, who consider them “crude” and “badly designed.” Nevertheless, these games are still available in some corners of the Internet and have been downloaded by hundreds of thousands of users.

A second strategy involves the modification (“modding”) of existing video games to twist the narrative into an extremist message or fantasy. One example is the Islamic State terrorist group’s modding of the first-person shooter video game, ARMA III, to make Islamic fighters the heroic protagonists rather than the villains. With their powerful immersive quality, these video games have, in some instances, been effective at inspiring and allegedly training extremists to perpetrate realworld attacks.

Third, extremist actors have used in-game chat functions to open lines of communication with ideological sympathizers and potential recruits. Although the lack of in-game communication data has made it impossible for academic researchers to track the presence of extremists in a systematic way, there is enough anecdotal evidence—including evidence obtained from police investigation files—to infer that in-game chatrooms can and do function as “radicalization funnels” in at least some cases.


White supremacists created a game aligned with their ideology (T0147.001: Game Asset). The Islamic State terrorist group created a mod of the game ARMA 3 (T0151.015: Online Game Platform, T0147.002: Game Mod Asset). Extremists also use communication features available in online games to recruit new members. | +| [T0147.002 Game Mod Asset](../../generated_pages/techniques/T0147.002.md) |  IT00000362 This report looks at how extremists exploit games and gaming adjacent platforms:

Ethnic Cleansing, a first-person shooter game created by the U.S. white supremacist organization National Alliance in 2002, was advertised as follows: “[T]he Race War has already begun. Your character, Will, runs through a ghetto blasting away at various blacks and spics to attempt to gain entrance to the subway system...where the jews have hidden to avoid the carnage. Then you get to blow away jews as they scream ‘Oy Vey!’ on your way to the command center.”

While games like Ethnic Cleansing —and others of a similar genocidal variety, including titles like Shoot the Blacks, Hatred, and Muslim Massacre —generate some press attention and controversy, they tend to be ignored by the large majority of gamers, who consider them “crude” and “badly designed.” Nevertheless, these games are still available in some corners of the Internet and have been downloaded by hundreds of thousands of users.

A second strategy involves the modification (“modding”) of existing video games to twist the narrative into an extremist message or fantasy. One example is the Islamic State terrorist group’s modding of the first-person shooter video game, ARMA III, to make Islamic fighters the heroic protagonists rather than the villains. With their powerful immersive quality, these video games have, in some instances, been effective at inspiring and allegedly training extremists to perpetrate realworld attacks.

Third, extremist actors have used in-game chat functions to open lines of communication with ideological sympathizers and potential recruits. Although the lack of in-game communication data has made it impossible for academic researchers to track the presence of extremists in a systematic way, there is enough anecdotal evidence—including evidence obtained from police investigation files—to infer that in-game chatrooms can and do function as “radicalization funnels” in at least some cases.


White supremacists created a game aligned with their ideology (T0147.001: Game Asset). The Islamic State terrorist group created a mod of the game ARMA 3 (T0151.015: Online Game Platform, T0147.002: Game Mod Asset). Extremists also use communication features available in online games to recruit new members. | +| [T0151.015 Online Game Platform](../../generated_pages/techniques/T0151.015.md) |  IT00000361 This report looks at how extremists exploit games and gaming adjacent platforms:

Ethnic Cleansing, a first-person shooter game created by the U.S. white supremacist organization National Alliance in 2002, was advertised as follows: “[T]he Race War has already begun. Your character, Will, runs through a ghetto blasting away at various blacks and spics to attempt to gain entrance to the subway system...where the jews have hidden to avoid the carnage. Then you get to blow away jews as they scream ‘Oy Vey!’ on your way to the command center.”

While games like Ethnic Cleansing —and others of a similar genocidal variety, including titles like Shoot the Blacks, Hatred, and Muslim Massacre —generate some press attention and controversy, they tend to be ignored by the large majority of gamers, who consider them “crude” and “badly designed.” Nevertheless, these games are still available in some corners of the Internet and have been downloaded by hundreds of thousands of users.

A second strategy involves the modification (“modding”) of existing video games to twist the narrative into an extremist message or fantasy. One example is the Islamic State terrorist group’s modding of the first-person shooter video game, ARMA III, to make Islamic fighters the heroic protagonists rather than the villains. With their powerful immersive quality, these video games have, in some instances, been effective at inspiring and allegedly training extremists to perpetrate realworld attacks.

Third, extremist actors have used in-game chat functions to open lines of communication with ideological sympathizers and potential recruits. Although the lack of in-game communication data has made it impossible for academic researchers to track the presence of extremists in a systematic way, there is enough anecdotal evidence—including evidence obtained from police investigation files—to infer that in-game chatrooms can and do function as “radicalization funnels” in at least some cases.


White supremacists created a game aligned with their ideology (T0147.001: Game Asset). The Islamic State terrorist group created a mod of the game ARMA 3 (T0151.015: Online Game Platform, T0147.002: Game Mod Asset). Extremists also use communication features available in online games to recruit new members. | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00099.md b/generated_pages/incidents/I00099.md index 115dabd..3bf560a 100644 --- a/generated_pages/incidents/I00099.md +++ b/generated_pages/incidents/I00099.md @@ -15,15 +15,16 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://www.vogue.co.uk/news/article/stop-deepfakes-campaign](https://www.vogue.co.uk/news/article/stop-deepfakes-campaign) | 2021/03/15 | Sophie Compton | Vogue | [https://web.archive.org/web/20230515171651/https://www.vogue.co.uk/news/article/stop-deepfakes-campaign](https://web.archive.org/web/20230515171651/https://www.vogue.co.uk/news/article/stop-deepfakes-campaign) | | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0086.002 Develop AI-Generated Images (Deepfakes)](../../generated_pages/techniques/T0086.002.md) |  IT00000364 Last October, British writer Helen was alerted to a series of deepfakes on a porn site that appear to show her engaging in extreme acts of sexual violence. That night, the images replayed themselves over and over in horrific nightmares and she was gripped by an all-consuming feeling of dread. “It’s like you’re in a tunnel, going further and further into this enclosed space, where there’s no light,” she tells Vogue. This feeling pervaded Helen’s life. Whenever she left the house, she felt exposed. On runs, she experienced panic attacks. Helen still has no idea who did this to her.

[...]

Amnesty International has been investigating the effects of abuse against women on Twitter, specifically in relation to how they act online thereafter. According to the charity, abuse creates what they’ve called “the silencing effect” whereby women feel discouraged from participating online. The same can be said for victims of deepfakes.

Helen has never been afraid to use her voice, writing deeply personal accounts of postnatal depression. But the deepfakes created a feeling of shame so strong she thought she’d be carrying this “dirty secret” forever, and so she stopped writing.

[...]

Meanwhile, deepfake ‘communities’ are thriving. There are now dedicated sites, user-friendly apps and organised ‘request’ procedures. Some sites allow you to commission custom deepfakes for £25, while on others you can upload a woman’s image and a bot will strip her naked.

“This violation is not something that should be normalised,” says Gibi, an ASMR artist with 3.13 million YouTube subscribers. Gibi has given up trying to keep tabs on the deepfakes of her. For Gibi, the most egregious part of all of this is the fact that people are “profiting off my face, doing something that I didn’t consent to, like my suffering is your livelihood.” She’s even been approached by a company offering to remove the deepfakes — for £500 a video. This has to end. But how?


A website hosting pornographic content provided users the ability to create deepfake content (T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)).

Another website enabled users to commission custom deepfakes (T0152.004: Website, T0148.004: Payment Processing Capability, T0086.002: Develop AI-Generated Images (Deepfakes), T0155.005: Paid Access). | -| [T0152.004 Website](../../generated_pages/techniques/T0152.004.md) |  IT00000365 Last October, British writer Helen was alerted to a series of deepfakes on a porn site that appear to show her engaging in extreme acts of sexual violence. That night, the images replayed themselves over and over in horrific nightmares and she was gripped by an all-consuming feeling of dread. “It’s like you’re in a tunnel, going further and further into this enclosed space, where there’s no light,” she tells Vogue. This feeling pervaded Helen’s life. Whenever she left the house, she felt exposed. On runs, she experienced panic attacks. Helen still has no idea who did this to her.

[...]

Amnesty International has been investigating the effects of abuse against women on Twitter, specifically in relation to how they act online thereafter. According to the charity, abuse creates what they’ve called “the silencing effect” whereby women feel discouraged from participating online. The same can be said for victims of deepfakes.

Helen has never been afraid to use her voice, writing deeply personal accounts of postnatal depression. But the deepfakes created a feeling of shame so strong she thought she’d be carrying this “dirty secret” forever, and so she stopped writing.

[...]

Meanwhile, deepfake ‘communities’ are thriving. There are now dedicated sites, user-friendly apps and organised ‘request’ procedures. Some sites allow you to commission custom deepfakes for £25, while on others you can upload a woman’s image and a bot will strip her naked.

“This violation is not something that should be normalised,” says Gibi, an ASMR artist with 3.13 million YouTube subscribers. Gibi has given up trying to keep tabs on the deepfakes of her. For Gibi, the most egregious part of all of this is the fact that people are “profiting off my face, doing something that I didn’t consent to, like my suffering is your livelihood.” She’s even been approached by a company offering to remove the deepfakes — for £500 a video. This has to end. But how?


A website hosting pornographic content provided users the ability to create deepfake content (T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)).

Another website enabled users to commission custom deepfakes (T0152.004: Website, T0148.004: Payment Processing Capability, T0086.002: Develop AI-Generated Images (Deepfakes), T0155.005: Paid Access). | -| [T0154.002 AI Media Platform](../../generated_pages/techniques/T0154.002.md) |  IT00000363 Last October, British writer Helen was alerted to a series of deepfakes on a porn site that appear to show her engaging in extreme acts of sexual violence. That night, the images replayed themselves over and over in horrific nightmares and she was gripped by an all-consuming feeling of dread. “It’s like you’re in a tunnel, going further and further into this enclosed space, where there’s no light,” she tells Vogue. This feeling pervaded Helen’s life. Whenever she left the house, she felt exposed. On runs, she experienced panic attacks. Helen still has no idea who did this to her.

[...]

Amnesty International has been investigating the effects of abuse against women on Twitter, specifically in relation to how they act online thereafter. According to the charity, abuse creates what they’ve called “the silencing effect” whereby women feel discouraged from participating online. The same can be said for victims of deepfakes.

Helen has never been afraid to use her voice, writing deeply personal accounts of postnatal depression. But the deepfakes created a feeling of shame so strong she thought she’d be carrying this “dirty secret” forever, and so she stopped writing.

[...]

Meanwhile, deepfake ‘communities’ are thriving. There are now dedicated sites, user-friendly apps and organised ‘request’ procedures. Some sites allow you to commission custom deepfakes for £25, while on others you can upload a woman’s image and a bot will strip her naked.

“This violation is not something that should be normalised,” says Gibi, an ASMR artist with 3.13 million YouTube subscribers. Gibi has given up trying to keep tabs on the deepfakes of her. For Gibi, the most egregious part of all of this is the fact that people are “profiting off my face, doing something that I didn’t consent to, like my suffering is your livelihood.” She’s even been approached by a company offering to remove the deepfakes — for £500 a video. This has to end. But how?


A website hosting pornographic content provided users the ability to create deepfake content (T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)).

Another website enabled users to commission custom deepfakes (T0152.004: Website, T0148.004: Payment Processing Capability, T0086.002: Develop AI-Generated Images (Deepfakes), T0155.005: Paid Access). | -| [T0155.005 Paid Access](../../generated_pages/techniques/T0155.005.md) |  IT00000366 Last October, British writer Helen was alerted to a series of deepfakes on a porn site that appear to show her engaging in extreme acts of sexual violence. That night, the images replayed themselves over and over in horrific nightmares and she was gripped by an all-consuming feeling of dread. “It’s like you’re in a tunnel, going further and further into this enclosed space, where there’s no light,” she tells Vogue. This feeling pervaded Helen’s life. Whenever she left the house, she felt exposed. On runs, she experienced panic attacks. Helen still has no idea who did this to her.

[...]

Amnesty International has been investigating the effects of abuse against women on Twitter, specifically in relation to how they act online thereafter. According to the charity, abuse creates what they’ve called “the silencing effect” whereby women feel discouraged from participating online. The same can be said for victims of deepfakes.

Helen has never been afraid to use her voice, writing deeply personal accounts of postnatal depression. But the deepfakes created a feeling of shame so strong she thought she’d be carrying this “dirty secret” forever, and so she stopped writing.

[...]

Meanwhile, deepfake ‘communities’ are thriving. There are now dedicated sites, user-friendly apps and organised ‘request’ procedures. Some sites allow you to commission custom deepfakes for £25, while on others you can upload a woman’s image and a bot will strip her naked.

“This violation is not something that should be normalised,” says Gibi, an ASMR artist with 3.13 million YouTube subscribers. Gibi has given up trying to keep tabs on the deepfakes of her. For Gibi, the most egregious part of all of this is the fact that people are “profiting off my face, doing something that I didn’t consent to, like my suffering is your livelihood.” She’s even been approached by a company offering to remove the deepfakes — for £500 a video. This has to end. But how?


A website hosting pornographic content provided users the ability to create deepfake content (T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)).

Another website enabled users to commission custom deepfakes (T0152.004: Website, T0148.004: Payment Processing Capability, T0086.002: Develop AI-Generated Images (Deepfakes), T0155.005: Paid Access). | +| [T0086.002 Develop AI-Generated Images (Deepfakes)](../../generated_pages/techniques/T0086.002.md) |  IT00000364 Last October, British writer Helen was alerted to a series of deepfakes on a porn site that appear to show her engaging in extreme acts of sexual violence. That night, the images replayed themselves over and over in horrific nightmares and she was gripped by an all-consuming feeling of dread. “It’s like you’re in a tunnel, going further and further into this enclosed space, where there’s no light,” she tells Vogue. This feeling pervaded Helen’s life. Whenever she left the house, she felt exposed. On runs, she experienced panic attacks. Helen still has no idea who did this to her.

[...]

Amnesty International has been investigating the effects of abuse against women on Twitter, specifically in relation to how they act online thereafter. According to the charity, abuse creates what they’ve called “the silencing effect” whereby women feel discouraged from participating online. The same can be said for victims of deepfakes.

Helen has never been afraid to use her voice, writing deeply personal accounts of postnatal depression. But the deepfakes created a feeling of shame so strong she thought she’d be carrying this “dirty secret” forever, and so she stopped writing.

[...]

Meanwhile, deepfake ‘communities’ are thriving. There are now dedicated sites, user-friendly apps and organised ‘request’ procedures. Some sites allow you to commission custom deepfakes for £25, while on others you can upload a woman’s image and a bot will strip her naked.

“This violation is not something that should be normalised,” says Gibi, an ASMR artist with 3.13 million YouTube subscribers. Gibi has given up trying to keep tabs on the deepfakes of her. For Gibi, the most egregious part of all of this is the fact that people are “profiting off my face, doing something that I didn’t consent to, like my suffering is your livelihood.” She’s even been approached by a company offering to remove the deepfakes — for £500 a video. This has to end. But how?


A website hosting pornographic content provided users the ability to create deepfake content (T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)).

Another website enabled users to commission custom deepfakes (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0086.002: Develop AI-Generated Images (Deepfakes), T0155.005: Paid Access Asset). | +| [T0152.004 Website Asset](../../generated_pages/techniques/T0152.004.md) |  IT00000365 Last October, British writer Helen was alerted to a series of deepfakes on a porn site that appear to show her engaging in extreme acts of sexual violence. That night, the images replayed themselves over and over in horrific nightmares and she was gripped by an all-consuming feeling of dread. “It’s like you’re in a tunnel, going further and further into this enclosed space, where there’s no light,” she tells Vogue. This feeling pervaded Helen’s life. Whenever she left the house, she felt exposed. On runs, she experienced panic attacks. Helen still has no idea who did this to her.

[...]

Amnesty International has been investigating the effects of abuse against women on Twitter, specifically in relation to how they act online thereafter. According to the charity, abuse creates what they’ve called “the silencing effect” whereby women feel discouraged from participating online. The same can be said for victims of deepfakes.

Helen has never been afraid to use her voice, writing deeply personal accounts of postnatal depression. But the deepfakes created a feeling of shame so strong she thought she’d be carrying this “dirty secret” forever, and so she stopped writing.

[...]

Meanwhile, deepfake ‘communities’ are thriving. There are now dedicated sites, user-friendly apps and organised ‘request’ procedures. Some sites allow you to commission custom deepfakes for £25, while on others you can upload a woman’s image and a bot will strip her naked.

“This violation is not something that should be normalised,” says Gibi, an ASMR artist with 3.13 million YouTube subscribers. Gibi has given up trying to keep tabs on the deepfakes of her. For Gibi, the most egregious part of all of this is the fact that people are “profiting off my face, doing something that I didn’t consent to, like my suffering is your livelihood.” She’s even been approached by a company offering to remove the deepfakes — for £500 a video. This has to end. But how?


A website hosting pornographic content provided users the ability to create deepfake content (T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)).

Another website enabled users to commission custom deepfakes (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0086.002: Develop AI-Generated Images (Deepfakes), T0155.005: Paid Access Asset). | +| [T0154.002 AI Media Platform](../../generated_pages/techniques/T0154.002.md) |  IT00000363 Last October, British writer Helen was alerted to a series of deepfakes on a porn site that appear to show her engaging in extreme acts of sexual violence. That night, the images replayed themselves over and over in horrific nightmares and she was gripped by an all-consuming feeling of dread. “It’s like you’re in a tunnel, going further and further into this enclosed space, where there’s no light,” she tells Vogue. This feeling pervaded Helen’s life. Whenever she left the house, she felt exposed. On runs, she experienced panic attacks. Helen still has no idea who did this to her.

[...]

Amnesty International has been investigating the effects of abuse against women on Twitter, specifically in relation to how they act online thereafter. According to the charity, abuse creates what they’ve called “the silencing effect” whereby women feel discouraged from participating online. The same can be said for victims of deepfakes.

Helen has never been afraid to use her voice, writing deeply personal accounts of postnatal depression. But the deepfakes created a feeling of shame so strong she thought she’d be carrying this “dirty secret” forever, and so she stopped writing.

[...]

Meanwhile, deepfake ‘communities’ are thriving. There are now dedicated sites, user-friendly apps and organised ‘request’ procedures. Some sites allow you to commission custom deepfakes for £25, while on others you can upload a woman’s image and a bot will strip her naked.

“This violation is not something that should be normalised,” says Gibi, an ASMR artist with 3.13 million YouTube subscribers. Gibi has given up trying to keep tabs on the deepfakes of her. For Gibi, the most egregious part of all of this is the fact that people are “profiting off my face, doing something that I didn’t consent to, like my suffering is your livelihood.” She’s even been approached by a company offering to remove the deepfakes — for £500 a video. This has to end. But how?


A website hosting pornographic content provided users the ability to create deepfake content (T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)).

Another website enabled users to commission custom deepfakes (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0086.002: Develop AI-Generated Images (Deepfakes), T0155.005: Paid Access Asset). | +| [T0155.005 Paid Access Asset](../../generated_pages/techniques/T0155.005.md) |  IT00000366 Last October, British writer Helen was alerted to a series of deepfakes on a porn site that appear to show her engaging in extreme acts of sexual violence. That night, the images replayed themselves over and over in horrific nightmares and she was gripped by an all-consuming feeling of dread. “It’s like you’re in a tunnel, going further and further into this enclosed space, where there’s no light,” she tells Vogue. This feeling pervaded Helen’s life. Whenever she left the house, she felt exposed. On runs, she experienced panic attacks. Helen still has no idea who did this to her.

[...]

Amnesty International has been investigating the effects of abuse against women on Twitter, specifically in relation to how they act online thereafter. According to the charity, abuse creates what they’ve called “the silencing effect” whereby women feel discouraged from participating online. The same can be said for victims of deepfakes.

Helen has never been afraid to use her voice, writing deeply personal accounts of postnatal depression. But the deepfakes created a feeling of shame so strong she thought she’d be carrying this “dirty secret” forever, and so she stopped writing.

[...]

Meanwhile, deepfake ‘communities’ are thriving. There are now dedicated sites, user-friendly apps and organised ‘request’ procedures. Some sites allow you to commission custom deepfakes for £25, while on others you can upload a woman’s image and a bot will strip her naked.

“This violation is not something that should be normalised,” says Gibi, an ASMR artist with 3.13 million YouTube subscribers. Gibi has given up trying to keep tabs on the deepfakes of her. For Gibi, the most egregious part of all of this is the fact that people are “profiting off my face, doing something that I didn’t consent to, like my suffering is your livelihood.” She’s even been approached by a company offering to remove the deepfakes — for £500 a video. This has to end. But how?


A website hosting pornographic content provided users the ability to create deepfake content (T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)).

Another website enabled users to commission custom deepfakes (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0086.002: Develop AI-Generated Images (Deepfakes), T0155.005: Paid Access Asset). | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00100.md b/generated_pages/incidents/I00100.md index d9e33ac..874341a 100644 --- a/generated_pages/incidents/I00100.md +++ b/generated_pages/incidents/I00100.md @@ -15,6 +15,7 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://venturebeat.com/media/why-thispersondoesnotexist-and-its-copycats-need-to-be-restricted/](https://venturebeat.com/media/why-thispersondoesnotexist-and-its-copycats-need-to-be-restricted/) | 2019/03/03 | Adam Ghahramani | VentureBeat | [https://web.archive.org/web/20240822125555/https://venturebeat.com/media/why-thispersondoesnotexist-and-its-copycats-need-to-be-restricted/](https://web.archive.org/web/20240822125555/https://venturebeat.com/media/why-thispersondoesnotexist-and-its-copycats-need-to-be-restricted/) | diff --git a/generated_pages/incidents/I00101.md b/generated_pages/incidents/I00101.md index f3e4a7a..af73bfe 100644 --- a/generated_pages/incidents/I00101.md +++ b/generated_pages/incidents/I00101.md @@ -15,15 +15,16 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://cepa.org/article/pro-putin-disinformation-warriors-take-war-of-aggression-to-reddit/](https://cepa.org/article/pro-putin-disinformation-warriors-take-war-of-aggression-to-reddit/) | 2023/12/12 | Aliide Naylor | CEPA | [https://web.archive.org/web/20240813063338/https://cepa.org/web/20240813063338/https://cepa.org/article/pro-putin-disinformation-warriors-take-war-of-aggression-to-reddit/](https://web.archive.org/web/20240813063338/https://cepa.org/web/20240813063338/https://cepa.org/article/pro-putin-disinformation-warriors-take-war-of-aggression-to-reddit/) | | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0124 Suppress Opposition](../../generated_pages/techniques/T0124.md) |  IT00000373 This report looks at changes content posted to communities on Reddit (called Subreddits) after teams of voluntary moderators are replaced with what appear to be pro-Russian voices:

The r/antiwar subreddit appears to be a very recent takeover target. With 12,900 members it is not the largest community on Reddit, but this does place it squarely within the top 5% of all communities in terms of membership.

Three months ago a new moderator team was instated by subreddit head u/democracy101. Any posts documenting Russian aggression in Ukraine are now swiftly removed, while the board has been flooded with posts about how Ukraine is losing, or how American “neocons wrecked” the country.

The pinned post from moderator u/n0ahbody proclaims: “People who call for an end to Russian aggressions but not the Western aggressions Russia is reacting to don’t really want peace.” This user takes the view that any negative opinion about Russia is “shaped by what the fanatically Russophobic MSM wants you to think,” and that the United States is not threatened by its neighbors. Russia is.”

When u/n0ahbody took over the sub, the user posted a triumphant and vitriolic diatribe in another pro-Russia subreddit with some 33,500 members, r/EndlessWar. “We are making progress. We are purging the sub of all NAFO and NAFO-adjacent elements. Hundreds of them have been banned over the last 24 hours for various rule infractions, for being NAFO or NAFO-adjacent,” the user said, referencing the grassroots, pro-Ukrainian North Atlantic Fella Organization (NAFO) meme movement.

Several former users have reported they have indeed been banned from r/antiwar since the change in moderators. “If this subreddit cannot be explicitly against the invasion of Ukraine it will never truly be anti-war,” wrote one user Halcyon_Rein, in the antiwar subreddit on September 6. They then edited the post to say, “Edit: btw, I got f**king banned for this 💀💀💀”


A community hosted on Reddit was taken over by new moderators (T0151.011: Community Sub-Forum, T0150.005: Compromised). These moderators removed content posted to the community which favoured Ukraine over Russia (T0146.004: Administrator Account, T0151.011: Community Sub-Forum, T0124: Suppress Opposition). | -| [T0146.004 Administrator Account](../../generated_pages/techniques/T0146.004.md) |  IT00000372 This report looks at changes content posted to communities on Reddit (called Subreddits) after teams of voluntary moderators are replaced with what appear to be pro-Russian voices:

The r/antiwar subreddit appears to be a very recent takeover target. With 12,900 members it is not the largest community on Reddit, but this does place it squarely within the top 5% of all communities in terms of membership.

Three months ago a new moderator team was instated by subreddit head u/democracy101. Any posts documenting Russian aggression in Ukraine are now swiftly removed, while the board has been flooded with posts about how Ukraine is losing, or how American “neocons wrecked” the country.

The pinned post from moderator u/n0ahbody proclaims: “People who call for an end to Russian aggressions but not the Western aggressions Russia is reacting to don’t really want peace.” This user takes the view that any negative opinion about Russia is “shaped by what the fanatically Russophobic MSM wants you to think,” and that the United States is not threatened by its neighbors. Russia is.”

When u/n0ahbody took over the sub, the user posted a triumphant and vitriolic diatribe in another pro-Russia subreddit with some 33,500 members, r/EndlessWar. “We are making progress. We are purging the sub of all NAFO and NAFO-adjacent elements. Hundreds of them have been banned over the last 24 hours for various rule infractions, for being NAFO or NAFO-adjacent,” the user said, referencing the grassroots, pro-Ukrainian North Atlantic Fella Organization (NAFO) meme movement.

Several former users have reported they have indeed been banned from r/antiwar since the change in moderators. “If this subreddit cannot be explicitly against the invasion of Ukraine it will never truly be anti-war,” wrote one user Halcyon_Rein, in the antiwar subreddit on September 6. They then edited the post to say, “Edit: btw, I got f**king banned for this 💀💀💀”


A community hosted on Reddit was taken over by new moderators (T0151.011: Community Sub-Forum, T0150.005: Compromised). These moderators removed content posted to the community which favoured Ukraine over Russia (T0146.004: Administrator Account, T0151.011: Community Sub-Forum, T0124: Suppress Opposition). | -| [T0150.005 Compromised](../../generated_pages/techniques/T0150.005.md) |  IT00000371 This report looks at changes content posted to communities on Reddit (called Subreddits) after teams of voluntary moderators are replaced with what appear to be pro-Russian voices:

The r/antiwar subreddit appears to be a very recent takeover target. With 12,900 members it is not the largest community on Reddit, but this does place it squarely within the top 5% of all communities in terms of membership.

Three months ago a new moderator team was instated by subreddit head u/democracy101. Any posts documenting Russian aggression in Ukraine are now swiftly removed, while the board has been flooded with posts about how Ukraine is losing, or how American “neocons wrecked” the country.

The pinned post from moderator u/n0ahbody proclaims: “People who call for an end to Russian aggressions but not the Western aggressions Russia is reacting to don’t really want peace.” This user takes the view that any negative opinion about Russia is “shaped by what the fanatically Russophobic MSM wants you to think,” and that the United States is not threatened by its neighbors. Russia is.”

When u/n0ahbody took over the sub, the user posted a triumphant and vitriolic diatribe in another pro-Russia subreddit with some 33,500 members, r/EndlessWar. “We are making progress. We are purging the sub of all NAFO and NAFO-adjacent elements. Hundreds of them have been banned over the last 24 hours for various rule infractions, for being NAFO or NAFO-adjacent,” the user said, referencing the grassroots, pro-Ukrainian North Atlantic Fella Organization (NAFO) meme movement.

Several former users have reported they have indeed been banned from r/antiwar since the change in moderators. “If this subreddit cannot be explicitly against the invasion of Ukraine it will never truly be anti-war,” wrote one user Halcyon_Rein, in the antiwar subreddit on September 6. They then edited the post to say, “Edit: btw, I got f**king banned for this 💀💀💀”


A community hosted on Reddit was taken over by new moderators (T0151.011: Community Sub-Forum, T0150.005: Compromised). These moderators removed content posted to the community which favoured Ukraine over Russia (T0146.004: Administrator Account, T0151.011: Community Sub-Forum, T0124: Suppress Opposition). | -| [T0151.011 Community Sub-Forum](../../generated_pages/techniques/T0151.011.md) |  IT00000370 This report looks at changes content posted to communities on Reddit (called Subreddits) after teams of voluntary moderators are replaced with what appear to be pro-Russian voices:

The r/antiwar subreddit appears to be a very recent takeover target. With 12,900 members it is not the largest community on Reddit, but this does place it squarely within the top 5% of all communities in terms of membership.

Three months ago a new moderator team was instated by subreddit head u/democracy101. Any posts documenting Russian aggression in Ukraine are now swiftly removed, while the board has been flooded with posts about how Ukraine is losing, or how American “neocons wrecked” the country.

The pinned post from moderator u/n0ahbody proclaims: “People who call for an end to Russian aggressions but not the Western aggressions Russia is reacting to don’t really want peace.” This user takes the view that any negative opinion about Russia is “shaped by what the fanatically Russophobic MSM wants you to think,” and that the United States is not threatened by its neighbors. Russia is.”

When u/n0ahbody took over the sub, the user posted a triumphant and vitriolic diatribe in another pro-Russia subreddit with some 33,500 members, r/EndlessWar. “We are making progress. We are purging the sub of all NAFO and NAFO-adjacent elements. Hundreds of them have been banned over the last 24 hours for various rule infractions, for being NAFO or NAFO-adjacent,” the user said, referencing the grassroots, pro-Ukrainian North Atlantic Fella Organization (NAFO) meme movement.

Several former users have reported they have indeed been banned from r/antiwar since the change in moderators. “If this subreddit cannot be explicitly against the invasion of Ukraine it will never truly be anti-war,” wrote one user Halcyon_Rein, in the antiwar subreddit on September 6. They then edited the post to say, “Edit: btw, I got f**king banned for this 💀💀💀”


A community hosted on Reddit was taken over by new moderators (T0151.011: Community Sub-Forum, T0150.005: Compromised). These moderators removed content posted to the community which favoured Ukraine over Russia (T0146.004: Administrator Account, T0151.011: Community Sub-Forum, T0124: Suppress Opposition). | +| [T0124 Suppress Opposition](../../generated_pages/techniques/T0124.md) |  IT00000373 This report looks at changes content posted to communities on Reddit (called Subreddits) after teams of voluntary moderators are replaced with what appear to be pro-Russian voices:

The r/antiwar subreddit appears to be a very recent takeover target. With 12,900 members it is not the largest community on Reddit, but this does place it squarely within the top 5% of all communities in terms of membership.

Three months ago a new moderator team was instated by subreddit head u/democracy101. Any posts documenting Russian aggression in Ukraine are now swiftly removed, while the board has been flooded with posts about how Ukraine is losing, or how American “neocons wrecked” the country.

The pinned post from moderator u/n0ahbody proclaims: “People who call for an end to Russian aggressions but not the Western aggressions Russia is reacting to don’t really want peace.” This user takes the view that any negative opinion about Russia is “shaped by what the fanatically Russophobic MSM wants you to think,” and that the United States is not threatened by its neighbors. Russia is.”

When u/n0ahbody took over the sub, the user posted a triumphant and vitriolic diatribe in another pro-Russia subreddit with some 33,500 members, r/EndlessWar. “We are making progress. We are purging the sub of all NAFO and NAFO-adjacent elements. Hundreds of them have been banned over the last 24 hours for various rule infractions, for being NAFO or NAFO-adjacent,” the user said, referencing the grassroots, pro-Ukrainian North Atlantic Fella Organization (NAFO) meme movement.

Several former users have reported they have indeed been banned from r/antiwar since the change in moderators. “If this subreddit cannot be explicitly against the invasion of Ukraine it will never truly be anti-war,” wrote one user Halcyon_Rein, in the antiwar subreddit on September 6. They then edited the post to say, “Edit: btw, I got f**king banned for this 💀💀💀”


A community hosted on Reddit was taken over by new moderators (T0151.011: Community Sub-Forum, T0150.005: Compromised Asset). These moderators removed content posted to the community which favoured Ukraine over Russia (T0146.004: Administrator Account Asset, T0151.011: Community Sub-Forum, T0124: Suppress Opposition). | +| [T0146.004 Administrator Account Asset](../../generated_pages/techniques/T0146.004.md) |  IT00000372 This report looks at changes content posted to communities on Reddit (called Subreddits) after teams of voluntary moderators are replaced with what appear to be pro-Russian voices:

The r/antiwar subreddit appears to be a very recent takeover target. With 12,900 members it is not the largest community on Reddit, but this does place it squarely within the top 5% of all communities in terms of membership.

Three months ago a new moderator team was instated by subreddit head u/democracy101. Any posts documenting Russian aggression in Ukraine are now swiftly removed, while the board has been flooded with posts about how Ukraine is losing, or how American “neocons wrecked” the country.

The pinned post from moderator u/n0ahbody proclaims: “People who call for an end to Russian aggressions but not the Western aggressions Russia is reacting to don’t really want peace.” This user takes the view that any negative opinion about Russia is “shaped by what the fanatically Russophobic MSM wants you to think,” and that the United States is not threatened by its neighbors. Russia is.”

When u/n0ahbody took over the sub, the user posted a triumphant and vitriolic diatribe in another pro-Russia subreddit with some 33,500 members, r/EndlessWar. “We are making progress. We are purging the sub of all NAFO and NAFO-adjacent elements. Hundreds of them have been banned over the last 24 hours for various rule infractions, for being NAFO or NAFO-adjacent,” the user said, referencing the grassroots, pro-Ukrainian North Atlantic Fella Organization (NAFO) meme movement.

Several former users have reported they have indeed been banned from r/antiwar since the change in moderators. “If this subreddit cannot be explicitly against the invasion of Ukraine it will never truly be anti-war,” wrote one user Halcyon_Rein, in the antiwar subreddit on September 6. They then edited the post to say, “Edit: btw, I got f**king banned for this 💀💀💀”


A community hosted on Reddit was taken over by new moderators (T0151.011: Community Sub-Forum, T0150.005: Compromised Asset). These moderators removed content posted to the community which favoured Ukraine over Russia (T0146.004: Administrator Account Asset, T0151.011: Community Sub-Forum, T0124: Suppress Opposition). | +| [T0150.005 Compromised Asset](../../generated_pages/techniques/T0150.005.md) |  IT00000371 This report looks at changes content posted to communities on Reddit (called Subreddits) after teams of voluntary moderators are replaced with what appear to be pro-Russian voices:

The r/antiwar subreddit appears to be a very recent takeover target. With 12,900 members it is not the largest community on Reddit, but this does place it squarely within the top 5% of all communities in terms of membership.

Three months ago a new moderator team was instated by subreddit head u/democracy101. Any posts documenting Russian aggression in Ukraine are now swiftly removed, while the board has been flooded with posts about how Ukraine is losing, or how American “neocons wrecked” the country.

The pinned post from moderator u/n0ahbody proclaims: “People who call for an end to Russian aggressions but not the Western aggressions Russia is reacting to don’t really want peace.” This user takes the view that any negative opinion about Russia is “shaped by what the fanatically Russophobic MSM wants you to think,” and that the United States is not threatened by its neighbors. Russia is.”

When u/n0ahbody took over the sub, the user posted a triumphant and vitriolic diatribe in another pro-Russia subreddit with some 33,500 members, r/EndlessWar. “We are making progress. We are purging the sub of all NAFO and NAFO-adjacent elements. Hundreds of them have been banned over the last 24 hours for various rule infractions, for being NAFO or NAFO-adjacent,” the user said, referencing the grassroots, pro-Ukrainian North Atlantic Fella Organization (NAFO) meme movement.

Several former users have reported they have indeed been banned from r/antiwar since the change in moderators. “If this subreddit cannot be explicitly against the invasion of Ukraine it will never truly be anti-war,” wrote one user Halcyon_Rein, in the antiwar subreddit on September 6. They then edited the post to say, “Edit: btw, I got f**king banned for this 💀💀💀”


A community hosted on Reddit was taken over by new moderators (T0151.011: Community Sub-Forum, T0150.005: Compromised Asset). These moderators removed content posted to the community which favoured Ukraine over Russia (T0146.004: Administrator Account Asset, T0151.011: Community Sub-Forum, T0124: Suppress Opposition). | +| [T0151.011 Community Sub-Forum](../../generated_pages/techniques/T0151.011.md) |  IT00000370 This report looks at changes content posted to communities on Reddit (called Subreddits) after teams of voluntary moderators are replaced with what appear to be pro-Russian voices:

The r/antiwar subreddit appears to be a very recent takeover target. With 12,900 members it is not the largest community on Reddit, but this does place it squarely within the top 5% of all communities in terms of membership.

Three months ago a new moderator team was instated by subreddit head u/democracy101. Any posts documenting Russian aggression in Ukraine are now swiftly removed, while the board has been flooded with posts about how Ukraine is losing, or how American “neocons wrecked” the country.

The pinned post from moderator u/n0ahbody proclaims: “People who call for an end to Russian aggressions but not the Western aggressions Russia is reacting to don’t really want peace.” This user takes the view that any negative opinion about Russia is “shaped by what the fanatically Russophobic MSM wants you to think,” and that the United States is not threatened by its neighbors. Russia is.”

When u/n0ahbody took over the sub, the user posted a triumphant and vitriolic diatribe in another pro-Russia subreddit with some 33,500 members, r/EndlessWar. “We are making progress. We are purging the sub of all NAFO and NAFO-adjacent elements. Hundreds of them have been banned over the last 24 hours for various rule infractions, for being NAFO or NAFO-adjacent,” the user said, referencing the grassroots, pro-Ukrainian North Atlantic Fella Organization (NAFO) meme movement.

Several former users have reported they have indeed been banned from r/antiwar since the change in moderators. “If this subreddit cannot be explicitly against the invasion of Ukraine it will never truly be anti-war,” wrote one user Halcyon_Rein, in the antiwar subreddit on September 6. They then edited the post to say, “Edit: btw, I got f**king banned for this 💀💀💀”


A community hosted on Reddit was taken over by new moderators (T0151.011: Community Sub-Forum, T0150.005: Compromised Asset). These moderators removed content posted to the community which favoured Ukraine over Russia (T0146.004: Administrator Account Asset, T0151.011: Community Sub-Forum, T0124: Suppress Opposition). | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00102.md b/generated_pages/incidents/I00102.md index 1cd87cb..ac142a4 100644 --- a/generated_pages/incidents/I00102.md +++ b/generated_pages/incidents/I00102.md @@ -15,21 +15,22 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://www.bellingcat.com/news/americas/2019/04/28/ignore-the-poway-synagogue-shooters-manifesto-pay-attention-to-8chans-pol-board/](https://www.bellingcat.com/news/americas/2019/04/28/ignore-the-poway-synagogue-shooters-manifesto-pay-attention-to-8chans-pol-board/) | 2019/04/28 | Robert Evans | bellingcat | [https://web.archive.org/web/20240929180218/https://www.bellingcat.com/news/americas/2019/04/28/ignore-the-poway-synagogue-shooters-manifesto-pay-attention-to-8chans-pol-board/](https://web.archive.org/web/20240929180218/https://www.bellingcat.com/news/americas/2019/04/28/ignore-the-poway-synagogue-shooters-manifesto-pay-attention-to-8chans-pol-board/) | | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0084.004 Appropriate Content](../../generated_pages/techniques/T0084.004.md) |  IT00000377 On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | -| [T0085.004 Develop Document](../../generated_pages/techniques/T0085.004.md) |  IT00000378 On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | -| [T0101 Create Localised Content](../../generated_pages/techniques/T0101.md) |  IT00000375 On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | -| [T0122 Direct Users to Alternative Platforms](../../generated_pages/techniques/T0122.md) |  IT00000382 On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | -| [T0129.006 Deny Involvement](../../generated_pages/techniques/T0129.006.md) |  IT00000383 On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | -| [T0146.006 Open Access Platform](../../generated_pages/techniques/T0146.006.md) |  IT00000376 On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | -| [T0151.001 Social Media Platform](../../generated_pages/techniques/T0151.001.md) |  IT00000381 On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | -| [T0151.012 Image Board Platform](../../generated_pages/techniques/T0151.012.md) |  IT00000374 On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | -| [T0152.005 Paste Platform](../../generated_pages/techniques/T0152.005.md) |  IT00000380 On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | -| [T0152.010 File Hosting Platform](../../generated_pages/techniques/T0152.010.md) |  IT00000379 On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | +| [T0084.004 Appropriate Content](../../generated_pages/techniques/T0084.004.md) |  IT00000377 On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | +| [T0085.004 Develop Document](../../generated_pages/techniques/T0085.004.md) |  IT00000378 On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | +| [T0101 Create Localised Content](../../generated_pages/techniques/T0101.md) |  IT00000375 On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | +| [T0122 Direct Users to Alternative Platforms](../../generated_pages/techniques/T0122.md) |  IT00000382 On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | +| [T0129.006 Deny Involvement](../../generated_pages/techniques/T0129.006.md) |  IT00000383 On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | +| [T0146.006 Open Access Platform](../../generated_pages/techniques/T0146.006.md) |  IT00000376 On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | +| [T0151.001 Social Media Platform](../../generated_pages/techniques/T0151.001.md) |  IT00000381 On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | +| [T0151.012 Image Board Platform](../../generated_pages/techniques/T0151.012.md) |  IT00000374 On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | +| [T0152.005 Paste Platform](../../generated_pages/techniques/T0152.005.md) |  IT00000380 On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | +| [T0152.010 File Hosting Platform](../../generated_pages/techniques/T0152.010.md) |  IT00000379 On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00103.md b/generated_pages/incidents/I00103.md index efeeaa6..15bca28 100644 --- a/generated_pages/incidents/I00103.md +++ b/generated_pages/incidents/I00103.md @@ -15,15 +15,16 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://www.bbc.co.uk/news/articles/ckg9k5dv1zdo](https://www.bbc.co.uk/news/articles/ckg9k5dv1zdo) | 2024/10/05 | Marianna Spring | BBC News | [https://web.archive.org/web/20241009114904/https://www.bbc.com/news/articles/ckg9k5dv1zdo](https://web.archive.org/web/20241009114904/https://www.bbc.com/news/articles/ckg9k5dv1zdo) | | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0088.001 Develop AI-Generated Audio (Deepfakes)](../../generated_pages/techniques/T0088.001.md) |  IT00000385 “I seriously don't understand why I have to constantly put up with these dumbasses here every day.”

So began what appeared to be a long tirade from the principal of Pikesville High School, punctuated with racist, antisemitic and offensive tropes. It sounded like it had been secretly recorded.

The speaker went on to bemoan “ungrateful black kids” and Jewish people in the community.

The clip, first posted in [January 2024], went viral nationally. But it really struck a nerve in the peaceful, leafy suburb of Pikesville, which has large black and Jewish populations, and in the nearby city of Baltimore, Maryland. Principal Eric Eiswert was put on paid administrative leave pending an investigation.

[...]

But what those sharing the clip didn’t realise at the time was that another bombshell was about to drop: the clip was an AI-generated fake.

[...]

[In April 2024], Baltimore Police Chief Robert McCullough confirmed they now had “conclusive evidence that the recording was not authentic”.

And they believed they knew who made the fake.

Police charged 31-year-old Dazhon Darien, the school’s athletics director, with several counts related to the fake video. Charges included theft, retaliating against a witness and stalking.

He was arrested at the airport, where police say he was planning to fly to Houston, Texas.

Police say that Mr Darien had been under investigation by Principal Eiswert over an alleged theft of $1,916 (£1,460) from the school. They also allege there had been “work performance challenges” and his contract was likely not to be renewed.

Their theory was that by creating the deepfake recording, he hoped to discredit the principal before he could be fired.

Investigators say they traced an email used to send the original video to a server connected to Mr Darien, and allege that he used Baltimore County Public Schools' computer network to access AI tools. He is due to stand trial in December 2024.


By associating Mr Darien to the server used to email the original AI generated audio, investigators link Darien to the fabricated content (T0149.005: Server, T0088.001: AI Generated Audio (Deepfakes)). They also assert that Darien used computers owned by the school to access platforms used to generate the audio (T0146: Account, T0154.002: AI Media Platform). | -| [T0146 Account](../../generated_pages/techniques/T0146.md) |  IT00000387 “I seriously don't understand why I have to constantly put up with these dumbasses here every day.”

So began what appeared to be a long tirade from the principal of Pikesville High School, punctuated with racist, antisemitic and offensive tropes. It sounded like it had been secretly recorded.

The speaker went on to bemoan “ungrateful black kids” and Jewish people in the community.

The clip, first posted in [January 2024], went viral nationally. But it really struck a nerve in the peaceful, leafy suburb of Pikesville, which has large black and Jewish populations, and in the nearby city of Baltimore, Maryland. Principal Eric Eiswert was put on paid administrative leave pending an investigation.

[...]

But what those sharing the clip didn’t realise at the time was that another bombshell was about to drop: the clip was an AI-generated fake.

[...]

[In April 2024], Baltimore Police Chief Robert McCullough confirmed they now had “conclusive evidence that the recording was not authentic”.

And they believed they knew who made the fake.

Police charged 31-year-old Dazhon Darien, the school’s athletics director, with several counts related to the fake video. Charges included theft, retaliating against a witness and stalking.

He was arrested at the airport, where police say he was planning to fly to Houston, Texas.

Police say that Mr Darien had been under investigation by Principal Eiswert over an alleged theft of $1,916 (£1,460) from the school. They also allege there had been “work performance challenges” and his contract was likely not to be renewed.

Their theory was that by creating the deepfake recording, he hoped to discredit the principal before he could be fired.

Investigators say they traced an email used to send the original video to a server connected to Mr Darien, and allege that he used Baltimore County Public Schools' computer network to access AI tools. He is due to stand trial in December 2024.


By associating Mr Darien to the server used to email the original AI generated audio, investigators link Darien to the fabricated content (T0149.005: Server, T0088.001: AI Generated Audio (Deepfakes)). They also assert that Darien used computers owned by the school to access platforms used to generate the audio (T0146: Account, T0154.002: AI Media Platform). | -| [T0149.005 Server](../../generated_pages/techniques/T0149.005.md) |  IT00000384 “I seriously don't understand why I have to constantly put up with these dumbasses here every day.”

So began what appeared to be a long tirade from the principal of Pikesville High School, punctuated with racist, antisemitic and offensive tropes. It sounded like it had been secretly recorded.

The speaker went on to bemoan “ungrateful black kids” and Jewish people in the community.

The clip, first posted in [January 2024], went viral nationally. But it really struck a nerve in the peaceful, leafy suburb of Pikesville, which has large black and Jewish populations, and in the nearby city of Baltimore, Maryland. Principal Eric Eiswert was put on paid administrative leave pending an investigation.

[...]

But what those sharing the clip didn’t realise at the time was that another bombshell was about to drop: the clip was an AI-generated fake.

[...]

[In April 2024], Baltimore Police Chief Robert McCullough confirmed they now had “conclusive evidence that the recording was not authentic”.

And they believed they knew who made the fake.

Police charged 31-year-old Dazhon Darien, the school’s athletics director, with several counts related to the fake video. Charges included theft, retaliating against a witness and stalking.

He was arrested at the airport, where police say he was planning to fly to Houston, Texas.

Police say that Mr Darien had been under investigation by Principal Eiswert over an alleged theft of $1,916 (£1,460) from the school. They also allege there had been “work performance challenges” and his contract was likely not to be renewed.

Their theory was that by creating the deepfake recording, he hoped to discredit the principal before he could be fired.

Investigators say they traced an email used to send the original video to a server connected to Mr Darien, and allege that he used Baltimore County Public Schools' computer network to access AI tools. He is due to stand trial in December 2024.


By associating Mr Darien to the server used to email the original AI generated audio, investigators link Darien to the fabricated content (T0149.005: Server, T0088.001: AI Generated Audio (Deepfakes)). They also assert that Darien used computers owned by the school to access platforms used to generate the audio (T0146: Account, T0154.002: AI Media Platform). | -| [T0154.002 AI Media Platform](../../generated_pages/techniques/T0154.002.md) |  IT00000386 “I seriously don't understand why I have to constantly put up with these dumbasses here every day.”

So began what appeared to be a long tirade from the principal of Pikesville High School, punctuated with racist, antisemitic and offensive tropes. It sounded like it had been secretly recorded.

The speaker went on to bemoan “ungrateful black kids” and Jewish people in the community.

The clip, first posted in [January 2024], went viral nationally. But it really struck a nerve in the peaceful, leafy suburb of Pikesville, which has large black and Jewish populations, and in the nearby city of Baltimore, Maryland. Principal Eric Eiswert was put on paid administrative leave pending an investigation.

[...]

But what those sharing the clip didn’t realise at the time was that another bombshell was about to drop: the clip was an AI-generated fake.

[...]

[In April 2024], Baltimore Police Chief Robert McCullough confirmed they now had “conclusive evidence that the recording was not authentic”.

And they believed they knew who made the fake.

Police charged 31-year-old Dazhon Darien, the school’s athletics director, with several counts related to the fake video. Charges included theft, retaliating against a witness and stalking.

He was arrested at the airport, where police say he was planning to fly to Houston, Texas.

Police say that Mr Darien had been under investigation by Principal Eiswert over an alleged theft of $1,916 (£1,460) from the school. They also allege there had been “work performance challenges” and his contract was likely not to be renewed.

Their theory was that by creating the deepfake recording, he hoped to discredit the principal before he could be fired.

Investigators say they traced an email used to send the original video to a server connected to Mr Darien, and allege that he used Baltimore County Public Schools' computer network to access AI tools. He is due to stand trial in December 2024.


By associating Mr Darien to the server used to email the original AI generated audio, investigators link Darien to the fabricated content (T0149.005: Server, T0088.001: AI Generated Audio (Deepfakes)). They also assert that Darien used computers owned by the school to access platforms used to generate the audio (T0146: Account, T0154.002: AI Media Platform). | +| [T0088.001 Develop AI-Generated Audio (Deepfakes)](../../generated_pages/techniques/T0088.001.md) |  IT00000385 “I seriously don't understand why I have to constantly put up with these dumbasses here every day.”

So began what appeared to be a long tirade from the principal of Pikesville High School, punctuated with racist, antisemitic and offensive tropes. It sounded like it had been secretly recorded.

The speaker went on to bemoan “ungrateful black kids” and Jewish people in the community.

The clip, first posted in [January 2024], went viral nationally. But it really struck a nerve in the peaceful, leafy suburb of Pikesville, which has large black and Jewish populations, and in the nearby city of Baltimore, Maryland. Principal Eric Eiswert was put on paid administrative leave pending an investigation.

[...]

But what those sharing the clip didn’t realise at the time was that another bombshell was about to drop: the clip was an AI-generated fake.

[...]

[In April 2024], Baltimore Police Chief Robert McCullough confirmed they now had “conclusive evidence that the recording was not authentic”.

And they believed they knew who made the fake.

Police charged 31-year-old Dazhon Darien, the school’s athletics director, with several counts related to the fake video. Charges included theft, retaliating against a witness and stalking.

He was arrested at the airport, where police say he was planning to fly to Houston, Texas.

Police say that Mr Darien had been under investigation by Principal Eiswert over an alleged theft of $1,916 (£1,460) from the school. They also allege there had been “work performance challenges” and his contract was likely not to be renewed.

Their theory was that by creating the deepfake recording, he hoped to discredit the principal before he could be fired.

Investigators say they traced an email used to send the original video to a server connected to Mr Darien, and allege that he used Baltimore County Public Schools' computer network to access AI tools. He is due to stand trial in December 2024.


By associating Mr Darien to the server used to email the original AI generated audio, investigators link Darien to the fabricated content (T0149.005: Server Asset, T0088.001: AI Generated Audio (Deepfakes)). They also assert that Darien used computers owned by the school to access platforms used to generate the audio (T0146: Account Asset, T0154.002: AI Media Platform). | +| [T0146 Account Asset](../../generated_pages/techniques/T0146.md) |  IT00000387 “I seriously don't understand why I have to constantly put up with these dumbasses here every day.”

So began what appeared to be a long tirade from the principal of Pikesville High School, punctuated with racist, antisemitic and offensive tropes. It sounded like it had been secretly recorded.

The speaker went on to bemoan “ungrateful black kids” and Jewish people in the community.

The clip, first posted in [January 2024], went viral nationally. But it really struck a nerve in the peaceful, leafy suburb of Pikesville, which has large black and Jewish populations, and in the nearby city of Baltimore, Maryland. Principal Eric Eiswert was put on paid administrative leave pending an investigation.

[...]

But what those sharing the clip didn’t realise at the time was that another bombshell was about to drop: the clip was an AI-generated fake.

[...]

[In April 2024], Baltimore Police Chief Robert McCullough confirmed they now had “conclusive evidence that the recording was not authentic”.

And they believed they knew who made the fake.

Police charged 31-year-old Dazhon Darien, the school’s athletics director, with several counts related to the fake video. Charges included theft, retaliating against a witness and stalking.

He was arrested at the airport, where police say he was planning to fly to Houston, Texas.

Police say that Mr Darien had been under investigation by Principal Eiswert over an alleged theft of $1,916 (£1,460) from the school. They also allege there had been “work performance challenges” and his contract was likely not to be renewed.

Their theory was that by creating the deepfake recording, he hoped to discredit the principal before he could be fired.

Investigators say they traced an email used to send the original video to a server connected to Mr Darien, and allege that he used Baltimore County Public Schools' computer network to access AI tools. He is due to stand trial in December 2024.


By associating Mr Darien to the server used to email the original AI generated audio, investigators link Darien to the fabricated content (T0149.005: Server Asset, T0088.001: AI Generated Audio (Deepfakes)). They also assert that Darien used computers owned by the school to access platforms used to generate the audio (T0146: Account Asset, T0154.002: AI Media Platform). | +| [T0149.005 Server Asset](../../generated_pages/techniques/T0149.005.md) |  IT00000384 “I seriously don't understand why I have to constantly put up with these dumbasses here every day.”

So began what appeared to be a long tirade from the principal of Pikesville High School, punctuated with racist, antisemitic and offensive tropes. It sounded like it had been secretly recorded.

The speaker went on to bemoan “ungrateful black kids” and Jewish people in the community.

The clip, first posted in [January 2024], went viral nationally. But it really struck a nerve in the peaceful, leafy suburb of Pikesville, which has large black and Jewish populations, and in the nearby city of Baltimore, Maryland. Principal Eric Eiswert was put on paid administrative leave pending an investigation.

[...]

But what those sharing the clip didn’t realise at the time was that another bombshell was about to drop: the clip was an AI-generated fake.

[...]

[In April 2024], Baltimore Police Chief Robert McCullough confirmed they now had “conclusive evidence that the recording was not authentic”.

And they believed they knew who made the fake.

Police charged 31-year-old Dazhon Darien, the school’s athletics director, with several counts related to the fake video. Charges included theft, retaliating against a witness and stalking.

He was arrested at the airport, where police say he was planning to fly to Houston, Texas.

Police say that Mr Darien had been under investigation by Principal Eiswert over an alleged theft of $1,916 (£1,460) from the school. They also allege there had been “work performance challenges” and his contract was likely not to be renewed.

Their theory was that by creating the deepfake recording, he hoped to discredit the principal before he could be fired.

Investigators say they traced an email used to send the original video to a server connected to Mr Darien, and allege that he used Baltimore County Public Schools' computer network to access AI tools. He is due to stand trial in December 2024.


By associating Mr Darien to the server used to email the original AI generated audio, investigators link Darien to the fabricated content (T0149.005: Server Asset, T0088.001: AI Generated Audio (Deepfakes)). They also assert that Darien used computers owned by the school to access platforms used to generate the audio (T0146: Account Asset, T0154.002: AI Media Platform). | +| [T0154.002 AI Media Platform](../../generated_pages/techniques/T0154.002.md) |  IT00000386 “I seriously don't understand why I have to constantly put up with these dumbasses here every day.”

So began what appeared to be a long tirade from the principal of Pikesville High School, punctuated with racist, antisemitic and offensive tropes. It sounded like it had been secretly recorded.

The speaker went on to bemoan “ungrateful black kids” and Jewish people in the community.

The clip, first posted in [January 2024], went viral nationally. But it really struck a nerve in the peaceful, leafy suburb of Pikesville, which has large black and Jewish populations, and in the nearby city of Baltimore, Maryland. Principal Eric Eiswert was put on paid administrative leave pending an investigation.

[...]

But what those sharing the clip didn’t realise at the time was that another bombshell was about to drop: the clip was an AI-generated fake.

[...]

[In April 2024], Baltimore Police Chief Robert McCullough confirmed they now had “conclusive evidence that the recording was not authentic”.

And they believed they knew who made the fake.

Police charged 31-year-old Dazhon Darien, the school’s athletics director, with several counts related to the fake video. Charges included theft, retaliating against a witness and stalking.

He was arrested at the airport, where police say he was planning to fly to Houston, Texas.

Police say that Mr Darien had been under investigation by Principal Eiswert over an alleged theft of $1,916 (£1,460) from the school. They also allege there had been “work performance challenges” and his contract was likely not to be renewed.

Their theory was that by creating the deepfake recording, he hoped to discredit the principal before he could be fired.

Investigators say they traced an email used to send the original video to a server connected to Mr Darien, and allege that he used Baltimore County Public Schools' computer network to access AI tools. He is due to stand trial in December 2024.


By associating Mr Darien to the server used to email the original AI generated audio, investigators link Darien to the fabricated content (T0149.005: Server Asset, T0088.001: AI Generated Audio (Deepfakes)). They also assert that Darien used computers owned by the school to access platforms used to generate the audio (T0146: Account Asset, T0154.002: AI Media Platform). | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00104.md b/generated_pages/incidents/I00104.md index 6a6ee82..e35c139 100644 --- a/generated_pages/incidents/I00104.md +++ b/generated_pages/incidents/I00104.md @@ -15,6 +15,7 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://www.motherjones.com/politics/2017/05/emmanuel-macron-hacking-email-leak-france/](https://www.motherjones.com/politics/2017/05/emmanuel-macron-hacking-email-leak-france/) | 2017/05/05 | AJ Vicens, Pema Levy, Laura Smith | Mother Jones | [https://web.archive.org/web/20240628030952/https://www.motherjones.com/politics/2017/05/emmanuel-macron-hacking-email-leak-france/](https://web.archive.org/web/20240628030952/https://www.motherjones.com/politics/2017/05/emmanuel-macron-hacking-email-leak-france/) | @@ -22,7 +23,7 @@ | --------- | ------------------------- | | [T0115 Post Content](../../generated_pages/techniques/T0115.md) |  IT00000391 A massive trove of documents purporting to contain thousands of emails and other files from the [2017 presidential] campaign of Emmanuel Macron—the French centrist candidate squaring off against right-wing nationalist Marine Le Pen—was posted on the internet Friday afternoon. The Macron campaign says that at least some of the documents are fake. The document dump came just over a day before voting is set to begin in the final round of the election and mere hours before candidates are legally required to stop campaigning.

At about 2:35 p.m. ET, a post appeared on the 4chan online message board announcing the leak. The documents appear to include emails, internal memos, and screenshots of purported banking records.

“In this pastebin are links to torrents of emails between Macron, his team and other officials, politicians as well as original documents and photos,” the anonymous 4chan poster wrote. “This was passed on to me today so now I am giving it to you, the people. The leak is massvie and released in the hopes that the human search engine here will be able to start sifting through the contents and figure out exactly what we have here.”

The Macron campaign issued a statement Friday night saying it was the victim of a “massive and coordinated” hacking attack. That campaign said the leak included some fake documents that were intended “to sow doubt and misinformation.”


Actors posted a to 4chan a link (T0151.012: Image Board Platform, T0146.006: Open Access Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms) to text content hosted on pastebin (T0152.005: Paste Platform, T0146.006: Open Access Platform, T0115: Post Content), which contained links to download stolen and fabricated documents. | | [T0122 Direct Users to Alternative Platforms](../../generated_pages/techniques/T0122.md) |  IT00000392 A massive trove of documents purporting to contain thousands of emails and other files from the [2017 presidential] campaign of Emmanuel Macron—the French centrist candidate squaring off against right-wing nationalist Marine Le Pen—was posted on the internet Friday afternoon. The Macron campaign says that at least some of the documents are fake. The document dump came just over a day before voting is set to begin in the final round of the election and mere hours before candidates are legally required to stop campaigning.

At about 2:35 p.m. ET, a post appeared on the 4chan online message board announcing the leak. The documents appear to include emails, internal memos, and screenshots of purported banking records.

“In this pastebin are links to torrents of emails between Macron, his team and other officials, politicians as well as original documents and photos,” the anonymous 4chan poster wrote. “This was passed on to me today so now I am giving it to you, the people. The leak is massvie and released in the hopes that the human search engine here will be able to start sifting through the contents and figure out exactly what we have here.”

The Macron campaign issued a statement Friday night saying it was the victim of a “massive and coordinated” hacking attack. That campaign said the leak included some fake documents that were intended “to sow doubt and misinformation.”


Actors posted a to 4chan a link (T0151.012: Image Board Platform, T0146.006: Open Access Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms) to text content hosted on pastebin (T0152.005: Paste Platform, T0146.006: Open Access Platform, T0115: Post Content), which contained links to download stolen and fabricated documents. | -| [T0146.007 Automated Account](../../generated_pages/techniques/T0146.007.md) |  IT00000389 A massive trove of documents purporting to contain thousands of emails and other files from the [2017 presidential] campaign of Emmanuel Macron—the French centrist candidate squaring off against right-wing nationalist Marine Le Pen—was posted on the internet Friday afternoon. The Macron campaign says that at least some of the documents are fake. The document dump came just over a day before voting is set to begin in the final round of the election and mere hours before candidates are legally required to stop campaigning.

At about 2:35 p.m. ET, a post appeared on the 4chan online message board announcing the leak. The documents appear to include emails, internal memos, and screenshots of purported banking records.

“In this pastebin are links to torrents of emails between Macron, his team and other officials, politicians as well as original documents and photos,” the anonymous 4chan poster wrote. “This was passed on to me today so now I am giving it to you, the people. The leak is massvie and released in the hopes that the human search engine here will be able to start sifting through the contents and figure out exactly what we have here.”

The Macron campaign issued a statement Friday night saying it was the victim of a “massive and coordinated” hacking attack. That campaign said the leak included some fake documents that were intended “to sow doubt and misinformation.”


Actors posted a to 4chan a link (T0151.012: Image Board Platform, T0146.006: Open Access Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms) to text content hosted on pastebin (T0152.005: Paste Platform, T0146.006: Open Access Platform, T0115: Post Content), which contained links to download stolen and fabricated documents. | +| [T0146.007 Automated Account Asset](../../generated_pages/techniques/T0146.007.md) |  IT00000389 A massive trove of documents purporting to contain thousands of emails and other files from the [2017 presidential] campaign of Emmanuel Macron—the French centrist candidate squaring off against right-wing nationalist Marine Le Pen—was posted on the internet Friday afternoon. The Macron campaign says that at least some of the documents are fake. The document dump came just over a day before voting is set to begin in the final round of the election and mere hours before candidates are legally required to stop campaigning.

At about 2:35 p.m. ET, a post appeared on the 4chan online message board announcing the leak. The documents appear to include emails, internal memos, and screenshots of purported banking records.

“In this pastebin are links to torrents of emails between Macron, his team and other officials, politicians as well as original documents and photos,” the anonymous 4chan poster wrote. “This was passed on to me today so now I am giving it to you, the people. The leak is massvie and released in the hopes that the human search engine here will be able to start sifting through the contents and figure out exactly what we have here.”

The Macron campaign issued a statement Friday night saying it was the victim of a “massive and coordinated” hacking attack. That campaign said the leak included some fake documents that were intended “to sow doubt and misinformation.”


Actors posted a to 4chan a link (T0151.012: Image Board Platform, T0146.006: Open Access Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms) to text content hosted on pastebin (T0152.005: Paste Platform, T0146.006: Open Access Platform, T0115: Post Content), which contained links to download stolen and fabricated documents. | | [T0151.012 Image Board Platform](../../generated_pages/techniques/T0151.012.md) |  IT00000388 A massive trove of documents purporting to contain thousands of emails and other files from the [2017 presidential] campaign of Emmanuel Macron—the French centrist candidate squaring off against right-wing nationalist Marine Le Pen—was posted on the internet Friday afternoon. The Macron campaign says that at least some of the documents are fake. The document dump came just over a day before voting is set to begin in the final round of the election and mere hours before candidates are legally required to stop campaigning.

At about 2:35 p.m. ET, a post appeared on the 4chan online message board announcing the leak. The documents appear to include emails, internal memos, and screenshots of purported banking records.

“In this pastebin are links to torrents of emails between Macron, his team and other officials, politicians as well as original documents and photos,” the anonymous 4chan poster wrote. “This was passed on to me today so now I am giving it to you, the people. The leak is massvie and released in the hopes that the human search engine here will be able to start sifting through the contents and figure out exactly what we have here.”

The Macron campaign issued a statement Friday night saying it was the victim of a “massive and coordinated” hacking attack. That campaign said the leak included some fake documents that were intended “to sow doubt and misinformation.”


Actors posted a to 4chan a link (T0151.012: Image Board Platform, T0146.006: Open Access Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms) to text content hosted on pastebin (T0152.005: Paste Platform, T0146.006: Open Access Platform, T0115: Post Content), which contained links to download stolen and fabricated documents. | | [T0152.005 Paste Platform](../../generated_pages/techniques/T0152.005.md) |  IT00000390 A massive trove of documents purporting to contain thousands of emails and other files from the [2017 presidential] campaign of Emmanuel Macron—the French centrist candidate squaring off against right-wing nationalist Marine Le Pen—was posted on the internet Friday afternoon. The Macron campaign says that at least some of the documents are fake. The document dump came just over a day before voting is set to begin in the final round of the election and mere hours before candidates are legally required to stop campaigning.

At about 2:35 p.m. ET, a post appeared on the 4chan online message board announcing the leak. The documents appear to include emails, internal memos, and screenshots of purported banking records.

“In this pastebin are links to torrents of emails between Macron, his team and other officials, politicians as well as original documents and photos,” the anonymous 4chan poster wrote. “This was passed on to me today so now I am giving it to you, the people. The leak is massvie and released in the hopes that the human search engine here will be able to start sifting through the contents and figure out exactly what we have here.”

The Macron campaign issued a statement Friday night saying it was the victim of a “massive and coordinated” hacking attack. That campaign said the leak included some fake documents that were intended “to sow doubt and misinformation.”


Actors posted a to 4chan a link (T0151.012: Image Board Platform, T0146.006: Open Access Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms) to text content hosted on pastebin (T0152.005: Paste Platform, T0146.006: Open Access Platform, T0115: Post Content), which contained links to download stolen and fabricated documents. | diff --git a/generated_pages/incidents/I00105.md b/generated_pages/incidents/I00105.md index ac9e2f0..db6bb1d 100644 --- a/generated_pages/incidents/I00105.md +++ b/generated_pages/incidents/I00105.md @@ -15,18 +15,19 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://gnet-research.org/2024/06/10/gaming-the-system-the-use-of-gaming-adjacent-communication-game-and-mod-platforms-by-extremist-actors/](https://gnet-research.org/2024/06/10/gaming-the-system-the-use-of-gaming-adjacent-communication-game-and-mod-platforms-by-extremist-actors/) | 2024/06/10 | Constantin Winkler, Lars Wiegold | Global Network on Extremism & Technology | [https://web.archive.org/web/20241009034732/https://gnet-research.org/2024/06/10/gaming-the-system-the-use-of-gaming-adjacent-communication-game-and-mod-platforms-by-extremist-actors/](https://web.archive.org/web/20241009034732/https://gnet-research.org/2024/06/10/gaming-the-system-the-use-of-gaming-adjacent-communication-game-and-mod-platforms-by-extremist-actors/) | | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0147.001 Game](../../generated_pages/techniques/T0147.001.md) |  IT00000397 In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Indie DB [is a platform that serves] to present indie games, which are titles from independent, small developer teams, which can be discussed and downloaded [Indie DB].

[...]

[On Indie DB we] found antisemitic, Islamist, sexist and other discriminatory content during the exploration. Both games and comments were located that made positive references to National Socialism. Radicalised users seem to use the opportunities to network, create groups, and communicate in forums. In addition, a number of member profiles with graphic propaganda content could be located. We found several games with propagandistic content advertised on the platform, which caused apparently radicalised users to form a fan community around those games. For example, there are titles such as the antisemitic Fursan Al-Aqsa: The Knights of the Al-Aqsa Mosque, which has won the Best Hardcore Game award at the Game Connection America 2024 Game Development Awards and which has received antisemitic praise on its review page. In the game, players need to target members of the Israeli Defence Forces, and attacks by Palestinian terrorist groups such as Hamas or Lions’ Den can be re-enacted. These include bomb attacks, beheadings and the re-enactment of the terrorist attack in Israel on 7 October 2023. We, therefore, deem Indie DB to be relevant for further analyses of extremist activities in digital gaming spaces.


Indie DB is an online software delivery platform on which users can create groups, and participate in discussion forums (T0152.009: Software Delivery Platform, T0151.002: Online Community Group, T0151.009: Legacy Online Forum Platform). The platform hosted games which allowed players to reenact terrorist attacks (T0147.001: Game). | -| [T0147.002 Game Mod](../../generated_pages/techniques/T0147.002.md) |  IT00000398 In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Gamebanana and Mod DB are so-called modding platforms that allow users to post their modifications of existing (popular) games. In the process of modding, highly radicalised content can be inserted into games that did not originally contain it. All of these platforms also have communication functions and customisable profiles.

[...]

During the explorations, several modifications with hateful themes were located, including right-wing extremist, racist, antisemitic and Islamist content. This includes mods that make it possible to play as terrorists or National Socialists. So-called “skins” (textures that change the appearance of models in the game) for characters from first-person shooters are particularly popular and contain references to National Socialism or Islamist terrorist organisations. Although some of this content could be justified with reference to historical accuracy and realism, the user profiles of the creators and commentators often reveal political motivations. Names with neo-Nazi codes or the use of avatars showing members of the Wehrmacht or the Waffen SS, for example, indicate a certain degree of positive appreciation or fascination with right-wing ideology, as do affirmations in the comment columns.

Mod DB in particular has attracted public attention in the past. For example, a mod for the game Half-Life 2 made it possible to play a school shooting with the weapons used during the attacks at Columbine High School (1999) and Virginia Polytechnic Institute and State University (2007). Antisemitic memes and jokes are shared in several groups on the platform. It seems as if users partially connect with each other because of shared political views. There were also indications that Islamist and right-wing extremist users network on the basis of shared views on women, Jews or homosexuals. In addition to relevant usernames and avatars, we found profiles featuring picture galleries, backgrounds and banners dedicated to the SS. Extremist propaganda and radicalisation processes on modding platforms have not been explored yet, but our exploration suggests these digital spaces to be highly relevant for our field.


Mod DB is a platform which allows users to upload mods for games, which other users can download (T0152.009: Software Delivery Platform, T0147.002: Game Mod). | -| [T0151.002 Online Community Group](../../generated_pages/techniques/T0151.002.md) |  IT00000395 In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Indie DB [is a platform that serves] to present indie games, which are titles from independent, small developer teams, which can be discussed and downloaded [Indie DB].

[...]

[On Indie DB we] found antisemitic, Islamist, sexist and other discriminatory content during the exploration. Both games and comments were located that made positive references to National Socialism. Radicalised users seem to use the opportunities to network, create groups, and communicate in forums. In addition, a number of member profiles with graphic propaganda content could be located. We found several games with propagandistic content advertised on the platform, which caused apparently radicalised users to form a fan community around those games. For example, there are titles such as the antisemitic Fursan Al-Aqsa: The Knights of the Al-Aqsa Mosque, which has won the Best Hardcore Game award at the Game Connection America 2024 Game Development Awards and which has received antisemitic praise on its review page. In the game, players need to target members of the Israeli Defence Forces, and attacks by Palestinian terrorist groups such as Hamas or Lions’ Den can be re-enacted. These include bomb attacks, beheadings and the re-enactment of the terrorist attack in Israel on 7 October 2023. We, therefore, deem Indie DB to be relevant for further analyses of extremist activities in digital gaming spaces.


Indie DB is an online software delivery platform on which users can create groups, and participate in discussion forums (T0152.009: Software Delivery Platform, T0151.002: Online Community Group, T0151.009: Legacy Online Forum Platform). The platform hosted games which allowed players to reenact terrorist attacks (T0147.001: Game). | -| [T0151.009 Legacy Online Forum Platform](../../generated_pages/techniques/T0151.009.md) |  IT00000394 In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Gamer Uprising Forums (GUF) [is an online discussion platform using the classic forum structure] aimed directly at gamers. It is run by US Neo-Nazi Andrew Anglin and explicitly targets politically right-wing gamers. This forum mainly includes antisemitic, sexist, and racist topics, but also posts on related issues such as esotericism, conspiracy narratives, pro-Russian propaganda, alternative medicine, Christian religion, content related to the incel- and manosphere, lists of criminal offences committed by non-white people, links to right-wing news sites, homophobia and trans-hostility, troll guides, anti-leftism, ableism and much more. Most noticeable were the high number of antisemitic references. For example, there is a thread with hundreds of machine-generated images, most of which feature openly antisemitic content and popular antisemitic references. Many users chose explicitly antisemitic avatars. Some of the usernames also provide clues to the users’ ideologies and profiles feature swastikas as a type of progress bar and indicator of the user’s activity in the forum.

The GUF’s front page contains an overview of the forum, user statistics, and so-called “announcements”. In addition to advice-like references, these feature various expressions of hateful ideologies. At the time of the exploration, the following could be read there: “Jews are the problem!”, “Women should be raped”, “The Jews are going to be required to return stolen property”, “Immigrants will have to be physically removed”, “Console gaming is for n******” and “Anger is a womanly emotion”. New users have to prove themselves in an area for newcomers referred to in imageboard slang as the “Newfag Barn”. Only when the newcomers’ posts have received a substantial number of likes from established users, are they allowed to post in other parts of the forum. It can be assumed that this will also lead to competitions to outdo each other in posting extreme content. However, it is always possible to view all posts and content on the site. In any case, it can be assumed that the platform hardly addresses milieus that are not already radicalised or at risk of radicalisation and is therefore deemed relevant for radicalisation research. However, the number of registered users is low (typical for radicalised milieus) and, hence, the platform may only be of interest when studying a small group of highly radicalised individuals.


Gamer Uprising Forum is a legacy online forum, with access gated behind approval of existing platform users (T0155.003: Approval Gated, T0151.009: Legacy Online Forum Platform). | -| [T0152.009 Software Delivery Platform](../../generated_pages/techniques/T0152.009.md) |  IT00000396 In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Indie DB [is a platform that serves] to present indie games, which are titles from independent, small developer teams, which can be discussed and downloaded [Indie DB].

[...]

[On Indie DB we] found antisemitic, Islamist, sexist and other discriminatory content during the exploration. Both games and comments were located that made positive references to National Socialism. Radicalised users seem to use the opportunities to network, create groups, and communicate in forums. In addition, a number of member profiles with graphic propaganda content could be located. We found several games with propagandistic content advertised on the platform, which caused apparently radicalised users to form a fan community around those games. For example, there are titles such as the antisemitic Fursan Al-Aqsa: The Knights of the Al-Aqsa Mosque, which has won the Best Hardcore Game award at the Game Connection America 2024 Game Development Awards and which has received antisemitic praise on its review page. In the game, players need to target members of the Israeli Defence Forces, and attacks by Palestinian terrorist groups such as Hamas or Lions’ Den can be re-enacted. These include bomb attacks, beheadings and the re-enactment of the terrorist attack in Israel on 7 October 2023. We, therefore, deem Indie DB to be relevant for further analyses of extremist activities in digital gaming spaces.


Indie DB is an online software delivery platform on which users can create groups, and participate in discussion forums (T0152.009: Software Delivery Platform, T0151.002: Online Community Group, T0151.009: Legacy Online Forum Platform). The platform hosted games which allowed players to reenact terrorist attacks (T0147.001: Game). | -| [T0152.009 Software Delivery Platform](../../generated_pages/techniques/T0152.009.md) |  IT00000399 In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Gamebanana and Mod DB are so-called modding platforms that allow users to post their modifications of existing (popular) games. In the process of modding, highly radicalised content can be inserted into games that did not originally contain it. All of these platforms also have communication functions and customisable profiles.

[...]

During the explorations, several modifications with hateful themes were located, including right-wing extremist, racist, antisemitic and Islamist content. This includes mods that make it possible to play as terrorists or National Socialists. So-called “skins” (textures that change the appearance of models in the game) for characters from first-person shooters are particularly popular and contain references to National Socialism or Islamist terrorist organisations. Although some of this content could be justified with reference to historical accuracy and realism, the user profiles of the creators and commentators often reveal political motivations. Names with neo-Nazi codes or the use of avatars showing members of the Wehrmacht or the Waffen SS, for example, indicate a certain degree of positive appreciation or fascination with right-wing ideology, as do affirmations in the comment columns.

Mod DB in particular has attracted public attention in the past. For example, a mod for the game Half-Life 2 made it possible to play a school shooting with the weapons used during the attacks at Columbine High School (1999) and Virginia Polytechnic Institute and State University (2007). Antisemitic memes and jokes are shared in several groups on the platform. It seems as if users partially connect with each other because of shared political views. There were also indications that Islamist and right-wing extremist users network on the basis of shared views on women, Jews or homosexuals. In addition to relevant usernames and avatars, we found profiles featuring picture galleries, backgrounds and banners dedicated to the SS. Extremist propaganda and radicalisation processes on modding platforms have not been explored yet, but our exploration suggests these digital spaces to be highly relevant for our field.


Mod DB is a platform which allows users to upload mods for games, which other users can download (T0152.009: Software Delivery Platform, T0147.002: Game Mod). | -| [T0155.003 Approval Gated](../../generated_pages/techniques/T0155.003.md) |  IT00000393 In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Gamer Uprising Forums (GUF) [is an online discussion platform using the classic forum structure] aimed directly at gamers. It is run by US Neo-Nazi Andrew Anglin and explicitly targets politically right-wing gamers. This forum mainly includes antisemitic, sexist, and racist topics, but also posts on related issues such as esotericism, conspiracy narratives, pro-Russian propaganda, alternative medicine, Christian religion, content related to the incel- and manosphere, lists of criminal offences committed by non-white people, links to right-wing news sites, homophobia and trans-hostility, troll guides, anti-leftism, ableism and much more. Most noticeable were the high number of antisemitic references. For example, there is a thread with hundreds of machine-generated images, most of which feature openly antisemitic content and popular antisemitic references. Many users chose explicitly antisemitic avatars. Some of the usernames also provide clues to the users’ ideologies and profiles feature swastikas as a type of progress bar and indicator of the user’s activity in the forum.

The GUF’s front page contains an overview of the forum, user statistics, and so-called “announcements”. In addition to advice-like references, these feature various expressions of hateful ideologies. At the time of the exploration, the following could be read there: “Jews are the problem!”, “Women should be raped”, “The Jews are going to be required to return stolen property”, “Immigrants will have to be physically removed”, “Console gaming is for n******” and “Anger is a womanly emotion”. New users have to prove themselves in an area for newcomers referred to in imageboard slang as the “Newfag Barn”. Only when the newcomers’ posts have received a substantial number of likes from established users, are they allowed to post in other parts of the forum. It can be assumed that this will also lead to competitions to outdo each other in posting extreme content. However, it is always possible to view all posts and content on the site. In any case, it can be assumed that the platform hardly addresses milieus that are not already radicalised or at risk of radicalisation and is therefore deemed relevant for radicalisation research. However, the number of registered users is low (typical for radicalised milieus) and, hence, the platform may only be of interest when studying a small group of highly radicalised individuals.


Gamer Uprising Forum is a legacy online forum, with access gated behind approval of existing platform users (T0155.003: Approval Gated, T0151.009: Legacy Online Forum Platform). | +| [T0147.001 Game Asset](../../generated_pages/techniques/T0147.001.md) |  IT00000397 In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Indie DB [is a platform that serves] to present indie games, which are titles from independent, small developer teams, which can be discussed and downloaded [Indie DB].

[...]

[On Indie DB we] found antisemitic, Islamist, sexist and other discriminatory content during the exploration. Both games and comments were located that made positive references to National Socialism. Radicalised users seem to use the opportunities to network, create groups, and communicate in forums. In addition, a number of member profiles with graphic propaganda content could be located. We found several games with propagandistic content advertised on the platform, which caused apparently radicalised users to form a fan community around those games. For example, there are titles such as the antisemitic Fursan Al-Aqsa: The Knights of the Al-Aqsa Mosque, which has won the Best Hardcore Game award at the Game Connection America 2024 Game Development Awards and which has received antisemitic praise on its review page. In the game, players need to target members of the Israeli Defence Forces, and attacks by Palestinian terrorist groups such as Hamas or Lions’ Den can be re-enacted. These include bomb attacks, beheadings and the re-enactment of the terrorist attack in Israel on 7 October 2023. We, therefore, deem Indie DB to be relevant for further analyses of extremist activities in digital gaming spaces.


Indie DB is an online software delivery platform on which users can create groups, and participate in discussion forums (T0152.009: Software Delivery Platform, T0151.002: Online Community Group, T0151.009: Legacy Online Forum Platform). The platform hosted games which allowed players to reenact terrorist attacks (T0147.001: Game Asset). | +| [T0147.002 Game Mod Asset](../../generated_pages/techniques/T0147.002.md) |  IT00000398 In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Gamebanana and Mod DB are so-called modding platforms that allow users to post their modifications of existing (popular) games. In the process of modding, highly radicalised content can be inserted into games that did not originally contain it. All of these platforms also have communication functions and customisable profiles.

[...]

During the explorations, several modifications with hateful themes were located, including right-wing extremist, racist, antisemitic and Islamist content. This includes mods that make it possible to play as terrorists or National Socialists. So-called “skins” (textures that change the appearance of models in the game) for characters from first-person shooters are particularly popular and contain references to National Socialism or Islamist terrorist organisations. Although some of this content could be justified with reference to historical accuracy and realism, the user profiles of the creators and commentators often reveal political motivations. Names with neo-Nazi codes or the use of avatars showing members of the Wehrmacht or the Waffen SS, for example, indicate a certain degree of positive appreciation or fascination with right-wing ideology, as do affirmations in the comment columns.

Mod DB in particular has attracted public attention in the past. For example, a mod for the game Half-Life 2 made it possible to play a school shooting with the weapons used during the attacks at Columbine High School (1999) and Virginia Polytechnic Institute and State University (2007). Antisemitic memes and jokes are shared in several groups on the platform. It seems as if users partially connect with each other because of shared political views. There were also indications that Islamist and right-wing extremist users network on the basis of shared views on women, Jews or homosexuals. In addition to relevant usernames and avatars, we found profiles featuring picture galleries, backgrounds and banners dedicated to the SS. Extremist propaganda and radicalisation processes on modding platforms have not been explored yet, but our exploration suggests these digital spaces to be highly relevant for our field.


Mod DB is a platform which allows users to upload mods for games, which other users can download (T0152.009: Software Delivery Platform, T0147.002: Game Mod Asset). | +| [T0151.002 Online Community Group](../../generated_pages/techniques/T0151.002.md) |  IT00000395 In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Indie DB [is a platform that serves] to present indie games, which are titles from independent, small developer teams, which can be discussed and downloaded [Indie DB].

[...]

[On Indie DB we] found antisemitic, Islamist, sexist and other discriminatory content during the exploration. Both games and comments were located that made positive references to National Socialism. Radicalised users seem to use the opportunities to network, create groups, and communicate in forums. In addition, a number of member profiles with graphic propaganda content could be located. We found several games with propagandistic content advertised on the platform, which caused apparently radicalised users to form a fan community around those games. For example, there are titles such as the antisemitic Fursan Al-Aqsa: The Knights of the Al-Aqsa Mosque, which has won the Best Hardcore Game award at the Game Connection America 2024 Game Development Awards and which has received antisemitic praise on its review page. In the game, players need to target members of the Israeli Defence Forces, and attacks by Palestinian terrorist groups such as Hamas or Lions’ Den can be re-enacted. These include bomb attacks, beheadings and the re-enactment of the terrorist attack in Israel on 7 October 2023. We, therefore, deem Indie DB to be relevant for further analyses of extremist activities in digital gaming spaces.


Indie DB is an online software delivery platform on which users can create groups, and participate in discussion forums (T0152.009: Software Delivery Platform, T0151.002: Online Community Group, T0151.009: Legacy Online Forum Platform). The platform hosted games which allowed players to reenact terrorist attacks (T0147.001: Game Asset). | +| [T0151.009 Legacy Online Forum Platform](../../generated_pages/techniques/T0151.009.md) |  IT00000394 In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Gamer Uprising Forums (GUF) [is an online discussion platform using the classic forum structure] aimed directly at gamers. It is run by US Neo-Nazi Andrew Anglin and explicitly targets politically right-wing gamers. This forum mainly includes antisemitic, sexist, and racist topics, but also posts on related issues such as esotericism, conspiracy narratives, pro-Russian propaganda, alternative medicine, Christian religion, content related to the incel- and manosphere, lists of criminal offences committed by non-white people, links to right-wing news sites, homophobia and trans-hostility, troll guides, anti-leftism, ableism and much more. Most noticeable were the high number of antisemitic references. For example, there is a thread with hundreds of machine-generated images, most of which feature openly antisemitic content and popular antisemitic references. Many users chose explicitly antisemitic avatars. Some of the usernames also provide clues to the users’ ideologies and profiles feature swastikas as a type of progress bar and indicator of the user’s activity in the forum.

The GUF’s front page contains an overview of the forum, user statistics, and so-called “announcements”. In addition to advice-like references, these feature various expressions of hateful ideologies. At the time of the exploration, the following could be read there: “Jews are the problem!”, “Women should be raped”, “The Jews are going to be required to return stolen property”, “Immigrants will have to be physically removed”, “Console gaming is for n******” and “Anger is a womanly emotion”. New users have to prove themselves in an area for newcomers referred to in imageboard slang as the “Newfag Barn”. Only when the newcomers’ posts have received a substantial number of likes from established users, are they allowed to post in other parts of the forum. It can be assumed that this will also lead to competitions to outdo each other in posting extreme content. However, it is always possible to view all posts and content on the site. In any case, it can be assumed that the platform hardly addresses milieus that are not already radicalised or at risk of radicalisation and is therefore deemed relevant for radicalisation research. However, the number of registered users is low (typical for radicalised milieus) and, hence, the platform may only be of interest when studying a small group of highly radicalised individuals.


Gamer Uprising Forum is a legacy online forum, with access gated behind approval of existing platform users (T0155.003: Approval Gated Asset, T0151.009: Legacy Online Forum Platform). | +| [T0152.009 Software Delivery Platform](../../generated_pages/techniques/T0152.009.md) |  IT00000396 In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Indie DB [is a platform that serves] to present indie games, which are titles from independent, small developer teams, which can be discussed and downloaded [Indie DB].

[...]

[On Indie DB we] found antisemitic, Islamist, sexist and other discriminatory content during the exploration. Both games and comments were located that made positive references to National Socialism. Radicalised users seem to use the opportunities to network, create groups, and communicate in forums. In addition, a number of member profiles with graphic propaganda content could be located. We found several games with propagandistic content advertised on the platform, which caused apparently radicalised users to form a fan community around those games. For example, there are titles such as the antisemitic Fursan Al-Aqsa: The Knights of the Al-Aqsa Mosque, which has won the Best Hardcore Game award at the Game Connection America 2024 Game Development Awards and which has received antisemitic praise on its review page. In the game, players need to target members of the Israeli Defence Forces, and attacks by Palestinian terrorist groups such as Hamas or Lions’ Den can be re-enacted. These include bomb attacks, beheadings and the re-enactment of the terrorist attack in Israel on 7 October 2023. We, therefore, deem Indie DB to be relevant for further analyses of extremist activities in digital gaming spaces.


Indie DB is an online software delivery platform on which users can create groups, and participate in discussion forums (T0152.009: Software Delivery Platform, T0151.002: Online Community Group, T0151.009: Legacy Online Forum Platform). The platform hosted games which allowed players to reenact terrorist attacks (T0147.001: Game Asset). | +| [T0152.009 Software Delivery Platform](../../generated_pages/techniques/T0152.009.md) |  IT00000399 In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Gamebanana and Mod DB are so-called modding platforms that allow users to post their modifications of existing (popular) games. In the process of modding, highly radicalised content can be inserted into games that did not originally contain it. All of these platforms also have communication functions and customisable profiles.

[...]

During the explorations, several modifications with hateful themes were located, including right-wing extremist, racist, antisemitic and Islamist content. This includes mods that make it possible to play as terrorists or National Socialists. So-called “skins” (textures that change the appearance of models in the game) for characters from first-person shooters are particularly popular and contain references to National Socialism or Islamist terrorist organisations. Although some of this content could be justified with reference to historical accuracy and realism, the user profiles of the creators and commentators often reveal political motivations. Names with neo-Nazi codes or the use of avatars showing members of the Wehrmacht or the Waffen SS, for example, indicate a certain degree of positive appreciation or fascination with right-wing ideology, as do affirmations in the comment columns.

Mod DB in particular has attracted public attention in the past. For example, a mod for the game Half-Life 2 made it possible to play a school shooting with the weapons used during the attacks at Columbine High School (1999) and Virginia Polytechnic Institute and State University (2007). Antisemitic memes and jokes are shared in several groups on the platform. It seems as if users partially connect with each other because of shared political views. There were also indications that Islamist and right-wing extremist users network on the basis of shared views on women, Jews or homosexuals. In addition to relevant usernames and avatars, we found profiles featuring picture galleries, backgrounds and banners dedicated to the SS. Extremist propaganda and radicalisation processes on modding platforms have not been explored yet, but our exploration suggests these digital spaces to be highly relevant for our field.


Mod DB is a platform which allows users to upload mods for games, which other users can download (T0152.009: Software Delivery Platform, T0147.002: Game Mod Asset). | +| [T0155.003 Approval Gated Asset](../../generated_pages/techniques/T0155.003.md) |  IT00000393 In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Gamer Uprising Forums (GUF) [is an online discussion platform using the classic forum structure] aimed directly at gamers. It is run by US Neo-Nazi Andrew Anglin and explicitly targets politically right-wing gamers. This forum mainly includes antisemitic, sexist, and racist topics, but also posts on related issues such as esotericism, conspiracy narratives, pro-Russian propaganda, alternative medicine, Christian religion, content related to the incel- and manosphere, lists of criminal offences committed by non-white people, links to right-wing news sites, homophobia and trans-hostility, troll guides, anti-leftism, ableism and much more. Most noticeable were the high number of antisemitic references. For example, there is a thread with hundreds of machine-generated images, most of which feature openly antisemitic content and popular antisemitic references. Many users chose explicitly antisemitic avatars. Some of the usernames also provide clues to the users’ ideologies and profiles feature swastikas as a type of progress bar and indicator of the user’s activity in the forum.

The GUF’s front page contains an overview of the forum, user statistics, and so-called “announcements”. In addition to advice-like references, these feature various expressions of hateful ideologies. At the time of the exploration, the following could be read there: “Jews are the problem!”, “Women should be raped”, “The Jews are going to be required to return stolen property”, “Immigrants will have to be physically removed”, “Console gaming is for n******” and “Anger is a womanly emotion”. New users have to prove themselves in an area for newcomers referred to in imageboard slang as the “Newfag Barn”. Only when the newcomers’ posts have received a substantial number of likes from established users, are they allowed to post in other parts of the forum. It can be assumed that this will also lead to competitions to outdo each other in posting extreme content. However, it is always possible to view all posts and content on the site. In any case, it can be assumed that the platform hardly addresses milieus that are not already radicalised or at risk of radicalisation and is therefore deemed relevant for radicalisation research. However, the number of registered users is low (typical for radicalised milieus) and, hence, the platform may only be of interest when studying a small group of highly radicalised individuals.


Gamer Uprising Forum is a legacy online forum, with access gated behind approval of existing platform users (T0155.003: Approval Gated Asset, T0151.009: Legacy Online Forum Platform). | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00106.md b/generated_pages/incidents/I00106.md index 8642261..22390a9 100644 --- a/generated_pages/incidents/I00106.md +++ b/generated_pages/incidents/I00106.md @@ -15,17 +15,18 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://futurism.com/hurricane-scam-ai-slop](https://futurism.com/hurricane-scam-ai-slop) | 2024/10/01 | Maggie Harrison Dupré | Futurism | [https://web.archive.org/web/20241007213926/https://futurism.com/hurricane-scam-ai-slop](https://web.archive.org/web/20241007213926/https://futurism.com/hurricane-scam-ai-slop) | | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0068 Respond to Breaking News Event or Active Crisis](../../generated_pages/techniques/T0068.md) |  IT00000403 As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.

A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.

But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.

Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.

But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.

The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.


A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis).

The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account, T0148.007: eCommerce Platform). | -| [T0086.002 Develop AI-Generated Images (Deepfakes)](../../generated_pages/techniques/T0086.002.md) |  IT00000402 As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.

A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.

But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.

Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.

But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.

The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.


A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis).

The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account, T0148.007: eCommerce Platform). | -| [T0122 Direct Users to Alternative Platforms](../../generated_pages/techniques/T0122.md) |  IT00000404 As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.

A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.

But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.

Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.

But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.

The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.


A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis).

The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account, T0148.007: eCommerce Platform). | -| [T0148.007 eCommerce Platform](../../generated_pages/techniques/T0148.007.md) |  IT00000405 As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.

A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.

But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.

Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.

But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.

The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.


A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis).

The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account, T0148.007: eCommerce Platform). | -| [T0151.001 Social Media Platform](../../generated_pages/techniques/T0151.001.md) |  IT00000400 As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.

A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.

But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.

Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.

But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.

The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.


A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis).

The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account, T0148.007: eCommerce Platform). | -| [T0151.003 Online Community Page](../../generated_pages/techniques/T0151.003.md) |  IT00000401 As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.

A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.

But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.

Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.

But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.

The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.


A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis).

The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account, T0148.007: eCommerce Platform). | +| [T0068 Respond to Breaking News Event or Active Crisis](../../generated_pages/techniques/T0068.md) |  IT00000403 As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.

A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.

But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.

Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.

But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.

The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.


A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis).

The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account Asset, T0148.007: eCommerce Platform). | +| [T0086.002 Develop AI-Generated Images (Deepfakes)](../../generated_pages/techniques/T0086.002.md) |  IT00000402 As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.

A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.

But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.

Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.

But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.

The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.


A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis).

The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account Asset, T0148.007: eCommerce Platform). | +| [T0122 Direct Users to Alternative Platforms](../../generated_pages/techniques/T0122.md) |  IT00000404 As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.

A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.

But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.

Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.

But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.

The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.


A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis).

The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account Asset, T0148.007: eCommerce Platform). | +| [T0148.007 eCommerce Platform](../../generated_pages/techniques/T0148.007.md) |  IT00000405 As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.

A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.

But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.

Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.

But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.

The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.


A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis).

The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account Asset, T0148.007: eCommerce Platform). | +| [T0151.001 Social Media Platform](../../generated_pages/techniques/T0151.001.md) |  IT00000400 As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.

A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.

But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.

Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.

But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.

The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.


A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis).

The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account Asset, T0148.007: eCommerce Platform). | +| [T0151.003 Online Community Page](../../generated_pages/techniques/T0151.003.md) |  IT00000401 As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.

A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.

But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.

Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.

But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.

The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.


A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis).

The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account Asset, T0148.007: eCommerce Platform). | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00107.md b/generated_pages/incidents/I00107.md index a682902..ce9f99e 100644 --- a/generated_pages/incidents/I00107.md +++ b/generated_pages/incidents/I00107.md @@ -15,6 +15,7 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://www.foreignaffairs.com/russia/lies-russia-tells-itself](https://www.foreignaffairs.com/russia/lies-russia-tells-itself) | 2024/09/30 | Thomas Rid | Forreign Affairs | [https://web.archive.org/web/20241009145602/https://www.foreignaffairs.com/russia/lies-russia-tells-itself](https://web.archive.org/web/20241009145602/https://www.foreignaffairs.com/russia/lies-russia-tells-itself) | diff --git a/generated_pages/incidents/I00108.md b/generated_pages/incidents/I00108.md index fd5cd8c..90a5a97 100644 --- a/generated_pages/incidents/I00108.md +++ b/generated_pages/incidents/I00108.md @@ -15,19 +15,20 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://www.disinfo.eu/publications/suavelos-white-supremacists-funded-through-facebook](https://www.disinfo.eu/publications/suavelos-white-supremacists-funded-through-facebook) | 2020/09/11 | Alexandre Alaphilippe, Roman Adamczyk, Gary Machado | EU Disinfo Lab | [https://web.archive.org/web/20240525113912/https://www.disinfo.eu/publications/suavelos-white-supremacists-funded-through-facebook/](https://web.archive.org/web/20240525113912/https://www.disinfo.eu/publications/suavelos-white-supremacists-funded-through-facebook/) | | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0092 Build Network](../../generated_pages/techniques/T0092.md) |  IT00000410 This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account, T0153.005: Online Advertising Platform). | -| [T0122 Direct Users to Alternative Platforms](../../generated_pages/techniques/T0122.md) |  IT00000415 This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account, T0153.005: Online Advertising Platform). | -| [T0151.001 Social Media Platform](../../generated_pages/techniques/T0151.001.md) |  IT00000411 This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account, T0153.005: Online Advertising Platform). | -| [T0151.003 Online Community Page](../../generated_pages/techniques/T0151.003.md) |  IT00000413 This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account, T0153.005: Online Advertising Platform). | -| [T0152.003 Website Hosting Platform](../../generated_pages/techniques/T0152.003.md) |  IT00000416 This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account, T0153.005: Online Advertising Platform). | -| [T0152.004 Website](../../generated_pages/techniques/T0152.004.md) |  IT00000417 This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account, T0153.005: Online Advertising Platform). | -| [T0153.005 Online Advertising Platform](../../generated_pages/techniques/T0153.005.md) |  IT00000418 This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account, T0153.005: Online Advertising Platform). | -| [T0153.006 Content Recommendation Algorithm](../../generated_pages/techniques/T0153.006.md) |  IT00000412 This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account, T0153.005: Online Advertising Platform). | +| [T0092 Build Network](../../generated_pages/techniques/T0092.md) |  IT00000410 This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website Asset), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account Asset, T0153.005: Online Advertising Platform). | +| [T0122 Direct Users to Alternative Platforms](../../generated_pages/techniques/T0122.md) |  IT00000415 This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website Asset), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account Asset, T0153.005: Online Advertising Platform). | +| [T0151.001 Social Media Platform](../../generated_pages/techniques/T0151.001.md) |  IT00000411 This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website Asset), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account Asset, T0153.005: Online Advertising Platform). | +| [T0151.003 Online Community Page](../../generated_pages/techniques/T0151.003.md) |  IT00000413 This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website Asset), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account Asset, T0153.005: Online Advertising Platform). | +| [T0152.003 Website Hosting Platform](../../generated_pages/techniques/T0152.003.md) |  IT00000416 This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website Asset), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account Asset, T0153.005: Online Advertising Platform). | +| [T0152.004 Website Asset](../../generated_pages/techniques/T0152.004.md) |  IT00000417 This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website Asset), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account Asset, T0153.005: Online Advertising Platform). | +| [T0153.005 Online Advertising Platform](../../generated_pages/techniques/T0153.005.md) |  IT00000418 This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website Asset), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account Asset, T0153.005: Online Advertising Platform). | +| [T0153.006 Content Recommendation Algorithm](../../generated_pages/techniques/T0153.006.md) |  IT00000412 This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website Asset), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account Asset, T0153.005: Online Advertising Platform). | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00109.md b/generated_pages/incidents/I00109.md index f808b1f..c99825e 100644 --- a/generated_pages/incidents/I00109.md +++ b/generated_pages/incidents/I00109.md @@ -15,27 +15,28 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://www.disinfo.eu/wp-content/uploads/2019/09/20190911_Disinformation_HateSpeechFunding.pdf](https://www.disinfo.eu/wp-content/uploads/2019/09/20190911_Disinformation_HateSpeechFunding.pdf) | 2019/09/11 | - | EU Disinfo Lab | [https://web.archive.org/web/20221214105547/https://www.disinfo.eu/wp-content/uploads/2019/09/20190911_Disinformation_HateSpeechFunding.pdf](https://web.archive.org/web/20221214105547/https://www.disinfo.eu/wp-content/uploads/2019/09/20190911_Disinformation_HateSpeechFunding.pdf) | | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0061 Sell Merchandise](../../generated_pages/techniques/T0061.md) |  IT00000430 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain) reveal a website set up to sell merchandise (T0152.004: Website, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.<.i>

Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server, T0149.006: IP Address). | -| [T0097.207 NGO Persona](../../generated_pages/techniques/T0097.207.md) |  IT00000431 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain) reveal a website set up to sell merchandise (T0152.004: Website, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.<.i>

Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server, T0149.006: IP Address). | -| [T0146 Account](../../generated_pages/techniques/T0146.md) |  IT00000424 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform). | -| [T0148.003 Payment Processing Platform](../../generated_pages/techniques/T0148.003.md) |  IT00000421 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform). | -| [T0148.004 Payment Processing Capability](../../generated_pages/techniques/T0148.004.md) |  IT00000420 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform). | -| [T0148.004 Payment Processing Capability](../../generated_pages/techniques/T0148.004.md) |  IT00000429 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain) reveal a website set up to sell merchandise (T0152.004: Website, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.<.i>

Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server, T0149.006: IP Address). | -| [T0149.001 Domain](../../generated_pages/techniques/T0149.001.md) |  IT00000428 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain) reveal a website set up to sell merchandise (T0152.004: Website, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.<.i>

Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server, T0149.006: IP Address). | -| [T0149.005 Server](../../generated_pages/techniques/T0149.005.md) |  IT00000433 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain) reveal a website set up to sell merchandise (T0152.004: Website, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.<.i>

Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server, T0149.006: IP Address). | -| [T0149.006 IP Address](../../generated_pages/techniques/T0149.006.md) |  IT00000434 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain) reveal a website set up to sell merchandise (T0152.004: Website, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.<.i>

Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server, T0149.006: IP Address). | -| [T0150.006 Purchased](../../generated_pages/techniques/T0150.006.md) |  IT00000432 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain) reveal a website set up to sell merchandise (T0152.004: Website, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.<.i>

Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server, T0149.006: IP Address). | -| [T0151.001 Social Media Platform](../../generated_pages/techniques/T0151.001.md) |  IT00000427 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform). | -| [T0151.008 Microblogging Platform](../../generated_pages/techniques/T0151.008.md) |  IT00000425 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform). | -| [T0151.009 Legacy Online Forum Platform](../../generated_pages/techniques/T0151.009.md) |  IT00000422 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform). | -| [T0152.004 Website](../../generated_pages/techniques/T0152.004.md) |  IT00000419 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform). | -| [T0152.006 Video Platform](../../generated_pages/techniques/T0152.006.md) |  IT00000426 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform). | -| [T0155 Gated Asset](../../generated_pages/techniques/T0155.md) |  IT00000423 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform). | +| [T0061 Sell Merchandise](../../generated_pages/techniques/T0061.md) |  IT00000430 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain Asset) reveal a website set up to sell merchandise (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website Asset, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.


Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain Asset, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server Asset, T0149.006: IP Address Asset). | +| [T0097.207 NGO Persona](../../generated_pages/techniques/T0097.207.md) |  IT00000431 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain Asset) reveal a website set up to sell merchandise (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website Asset, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.


Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain Asset, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server Asset, T0149.006: IP Address Asset). | +| [T0146 Account Asset](../../generated_pages/techniques/T0146.md) |  IT00000424 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform). | +| [T0148.003 Payment Processing Platform](../../generated_pages/techniques/T0148.003.md) |  IT00000421 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform). | +| [T0148.004 Payment Processing Capability](../../generated_pages/techniques/T0148.004.md) |  IT00000420 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform). | +| [T0148.004 Payment Processing Capability](../../generated_pages/techniques/T0148.004.md) |  IT00000429 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain Asset) reveal a website set up to sell merchandise (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website Asset, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.


Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain Asset, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server Asset, T0149.006: IP Address Asset). | +| [T0149.001 Domain Asset](../../generated_pages/techniques/T0149.001.md) |  IT00000428 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain Asset) reveal a website set up to sell merchandise (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website Asset, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.


Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain Asset, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server Asset, T0149.006: IP Address Asset). | +| [T0149.005 Server Asset](../../generated_pages/techniques/T0149.005.md) |  IT00000433 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain Asset) reveal a website set up to sell merchandise (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website Asset, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.


Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain Asset, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server Asset, T0149.006: IP Address Asset). | +| [T0149.006 IP Address Asset](../../generated_pages/techniques/T0149.006.md) |  IT00000434 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain Asset) reveal a website set up to sell merchandise (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website Asset, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.


Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain Asset, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server Asset, T0149.006: IP Address Asset). | +| [T0150.006 Purchased Asset](../../generated_pages/techniques/T0150.006.md) |  IT00000432 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain Asset) reveal a website set up to sell merchandise (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website Asset, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.


Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain Asset, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server Asset, T0149.006: IP Address Asset). | +| [T0151.001 Social Media Platform](../../generated_pages/techniques/T0151.001.md) |  IT00000427 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform). | +| [T0151.008 Microblogging Platform](../../generated_pages/techniques/T0151.008.md) |  IT00000425 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform). | +| [T0151.009 Legacy Online Forum Platform](../../generated_pages/techniques/T0151.009.md) |  IT00000422 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform). | +| [T0152.004 Website Asset](../../generated_pages/techniques/T0152.004.md) |  IT00000419 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform). | +| [T0152.006 Video Platform](../../generated_pages/techniques/T0152.006.md) |  IT00000426 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform). | +| [T0155 Gated Asset](../../generated_pages/techniques/T0155.md) |  IT00000423 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform). | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00110.md b/generated_pages/incidents/I00110.md index 768d655..8c313a4 100644 --- a/generated_pages/incidents/I00110.md +++ b/generated_pages/incidents/I00110.md @@ -15,20 +15,21 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://www.disinfo.eu/publications/how-covid-19-conspiracists-and-extremists-use-crowdfunding-platforms-to-fund-their-activities/](https://www.disinfo.eu/publications/how-covid-19-conspiracists-and-extremists-use-crowdfunding-platforms-to-fund-their-activities/) | 2024/10/01 | - | EU Disinfo Lab | [https://web.archive.org/web/20240910182937/https://www.disinfo.eu/publications/how-covid-19-conspiracists-and-extremists-use-crowdfunding-platforms-to-fund-their-activities](https://web.archive.org/web/20240910182937/https://www.disinfo.eu/publications/how-covid-19-conspiracists-and-extremists-use-crowdfunding-platforms-to-fund-their-activities) | | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0017 Conduct Fundraising](../../generated_pages/techniques/T0017.md) |  IT00000435 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:



More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | -| [T0085.005 Develop Book](../../generated_pages/techniques/T0085.005.md) | IT00000442 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:



More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | -| [T0087 Develop Video-Based Content](../../generated_pages/techniques/T0087.md) |  IT00000436 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:



More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | -| [T0097.202 News Outlet Persona](../../generated_pages/techniques/T0097.202.md) | IT00000439 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:



More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | -| [T0121.001 Bypass Content Blocking](../../generated_pages/techniques/T0121.001.md) |  IT00000440 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:



More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | -| [T0146 Account](../../generated_pages/techniques/T0146.md) | IT00000438 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:



More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | -| [T0148.006 Crowdfunding Platform](../../generated_pages/techniques/T0148.006.md) |  IT00000437 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:



More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | -| [T0148.007 eCommerce Platform](../../generated_pages/techniques/T0148.007.md) |  IT00000443 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:



More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | -| [T0152.012 Subscription Service Platform](../../generated_pages/techniques/T0152.012.md) | IT00000441 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:



More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | +| [T0017 Conduct Fundraising](../../generated_pages/techniques/T0017.md) |  IT00000435 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:

More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | +| [T0085.005 Develop Book](../../generated_pages/techniques/T0085.005.md) | IT00000442 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:

More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | +| [T0087 Develop Video-Based Content](../../generated_pages/techniques/T0087.md) |  IT00000436 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:

More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | +| [T0097.202 News Outlet Persona](../../generated_pages/techniques/T0097.202.md) | IT00000439 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:

More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | +| [T0121.001 Bypass Content Blocking](../../generated_pages/techniques/T0121.001.md) |  IT00000440 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:

More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | +| [T0146 Account Asset](../../generated_pages/techniques/T0146.md) | IT00000438 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:

More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | +| [T0148.006 Crowdfunding Platform](../../generated_pages/techniques/T0148.006.md) |  IT00000437 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:

More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | +| [T0148.007 eCommerce Platform](../../generated_pages/techniques/T0148.007.md) |  IT00000443 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:

More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | +| [T0152.012 Subscription Service Platform](../../generated_pages/techniques/T0152.012.md) | IT00000441 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:

More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00111.md b/generated_pages/incidents/I00111.md index 7880082..52766bd 100644 --- a/generated_pages/incidents/I00111.md +++ b/generated_pages/incidents/I00111.md @@ -15,18 +15,19 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://www.vice.com/en/article/patreon-is-bankrolling-climate-change-deniers-while-we-all-burn/](https://www.vice.com/en/article/patreon-is-bankrolling-climate-change-deniers-while-we-all-burn/) | 2021/06/30 | David Gilbert | Vice | [-](-) | | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0085 Develop Text-Based Content](../../generated_pages/techniques/T0085.md) |  IT00000444 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access). | -| [T0087 Develop Video-Based Content](../../generated_pages/techniques/T0087.md) |  IT00000445 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access). | -| [T0088 Develop Audio-Based Content](../../generated_pages/techniques/T0088.md) |  IT00000446 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access). | -| [T0146 Account](../../generated_pages/techniques/T0146.md) |  IT00000447 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access). | -| [T0151.014 Comments Section](../../generated_pages/techniques/T0151.014.md) |  IT00000449 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access). | -| [T0152.012 Subscription Service Platform](../../generated_pages/techniques/T0152.012.md) |  IT00000448 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access). | -| [T0155.006 Subscription Access](../../generated_pages/techniques/T0155.006.md) |  IT00000450 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access). | +| [T0085 Develop Text-Based Content](../../generated_pages/techniques/T0085.md) |  IT00000444 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account Asset, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access Asset). | +| [T0087 Develop Video-Based Content](../../generated_pages/techniques/T0087.md) |  IT00000445 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account Asset, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access Asset). | +| [T0088 Develop Audio-Based Content](../../generated_pages/techniques/T0088.md) |  IT00000446 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account Asset, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access Asset). | +| [T0146 Account Asset](../../generated_pages/techniques/T0146.md) |  IT00000447 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account Asset, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access Asset). | +| [T0151.014 Comments Section](../../generated_pages/techniques/T0151.014.md) |  IT00000449 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account Asset, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access Asset). | +| [T0152.012 Subscription Service Platform](../../generated_pages/techniques/T0152.012.md) |  IT00000448 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account Asset, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access Asset). | +| [T0155.006 Subscription Access Asset](../../generated_pages/techniques/T0155.006.md) |  IT00000450 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account Asset, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access Asset). | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00112.md b/generated_pages/incidents/I00112.md index ed385cd..7a1df02 100644 --- a/generated_pages/incidents/I00112.md +++ b/generated_pages/incidents/I00112.md @@ -15,14 +15,15 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://www.disinfo.eu/publications/patreon-allows-disinformation-and-conspiracies-to-be-monetised-in-spain/](https://www.disinfo.eu/publications/patreon-allows-disinformation-and-conspiracies-to-be-monetised-in-spain/) | 2020/12/18 | - | EU Disinfo Lab | [https://web.archive.org/web/20201218092320/https://www.disinfo.eu/publications/patreon-allows-disinformation-and-conspiracies-to-be-monetised-in-spain/](https://web.archive.org/web/20201218092320/https://www.disinfo.eu/publications/patreon-allows-disinformation-and-conspiracies-to-be-monetised-in-spain/) | | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0121.001 Bypass Content Blocking](../../generated_pages/techniques/T0121.001.md) |  IT00000452 In this report EU DisinfoLab identified 17 Spanish accounts that monetise and/or spread QAnon content or other conspiracy theories on Patreon. Content produced by these accounts go against Patreon’s stated policy of removing creators that “advance disinformation promoting the QAnon conspiracy theory”. EU DisinfoLab found:

In most cases, the creators monetise the content directly on Patreon (posts are only accessible for people sponsoring the creators) but there are also cases of indirect monetization (monetization through links leading to other platforms), an aspect that was flagged and analysed by Eu DisinfoLab in the mentioned previous report.

Some creators display links that redirects users to other platforms such as YouTube or LBRY where they can monetise their content. Some even offer almost all of their videos for free by redirecting to their YouTube channel.

Another modus operandi is for the creators to advertise on Patreon that they are looking for financing through PayPal or provide the author’s email to explore other financing alternatives.

[...]

Sometimes the content offered for a fee on Patreon is freely accessible on other platforms. Creators openly explain that they seek voluntary donation on Patreon, but that their creations will be public on YouTube. This means that the model of these platforms does not always involve a direct monetisation of the content. Creators who have built a strong reputation previously on other platforms can use Patreon as a platform to get some sponsorship which is not related to the content and give them more freedom to create.

Some users explicitly claim to use Patreon as a secondary channel to evade “censorship” by other platforms such as YouTube. Patreon seems to be perceived as a safe haven for disinformation and fringe content that has been suppressed for violating the policies of other platforms. For example, Jorge Guerra points out how Patreon acts as a back-up channel to go to in case of censorship by YouTube. Meanwhile, Alfa Mind openly claims to use Patreon to publish content that is not allowed on YouTube. “Exclusive access to videos that are prohibited on YouTube. These videos are only accessible on Patreon, as their content is very explicit and shocking”, are offered to the patrons who pay €3 per month.


In spite of Patreon’s stated policy, actors use accounts on their platform to generate revenue or donations for their content, and to provide a space to host content which was removed from other platforms (T0146: Account, T0152.012: Subscription Service Platform, T0121.001: Bypass Content Bocking).

Some actors were observed accepting donations via PayPal (T0146: Account, T0148.003: Payment Processing Platform). | -| [T0148.003 Payment Processing Platform](../../generated_pages/techniques/T0148.003.md) |  IT00000451 In this report EU DisinfoLab identified 17 Spanish accounts that monetise and/or spread QAnon content or other conspiracy theories on Patreon. Content produced by these accounts go against Patreon’s stated policy of removing creators that “advance disinformation promoting the QAnon conspiracy theory”. EU DisinfoLab found:

In most cases, the creators monetise the content directly on Patreon (posts are only accessible for people sponsoring the creators) but there are also cases of indirect monetization (monetization through links leading to other platforms), an aspect that was flagged and analysed by Eu DisinfoLab in the mentioned previous report.

Some creators display links that redirects users to other platforms such as YouTube or LBRY where they can monetise their content. Some even offer almost all of their videos for free by redirecting to their YouTube channel.

Another modus operandi is for the creators to advertise on Patreon that they are looking for financing through PayPal or provide the author’s email to explore other financing alternatives.

[...]

Sometimes the content offered for a fee on Patreon is freely accessible on other platforms. Creators openly explain that they seek voluntary donation on Patreon, but that their creations will be public on YouTube. This means that the model of these platforms does not always involve a direct monetisation of the content. Creators who have built a strong reputation previously on other platforms can use Patreon as a platform to get some sponsorship which is not related to the content and give them more freedom to create.

Some users explicitly claim to use Patreon as a secondary channel to evade “censorship” by other platforms such as YouTube. Patreon seems to be perceived as a safe haven for disinformation and fringe content that has been suppressed for violating the policies of other platforms. For example, Jorge Guerra points out how Patreon acts as a back-up channel to go to in case of censorship by YouTube. Meanwhile, Alfa Mind openly claims to use Patreon to publish content that is not allowed on YouTube. “Exclusive access to videos that are prohibited on YouTube. These videos are only accessible on Patreon, as their content is very explicit and shocking”, are offered to the patrons who pay €3 per month.


In spite of Patreon’s stated policy, actors use accounts on their platform to generate revenue or donations for their content, and to provide a space to host content which was removed from other platforms (T0146: Account, T0152.012: Subscription Service Platform, T0121.001: Bypass Content Bocking).

Some actors were observed accepting donations via PayPal (T0146: Account, T0148.003: Payment Processing Platform). | -| [T0152.012 Subscription Service Platform](../../generated_pages/techniques/T0152.012.md) |  IT00000453 In this report EU DisinfoLab identified 17 Spanish accounts that monetise and/or spread QAnon content or other conspiracy theories on Patreon. Content produced by these accounts go against Patreon’s stated policy of removing creators that “advance disinformation promoting the QAnon conspiracy theory”. EU DisinfoLab found:

In most cases, the creators monetise the content directly on Patreon (posts are only accessible for people sponsoring the creators) but there are also cases of indirect monetization (monetization through links leading to other platforms), an aspect that was flagged and analysed by Eu DisinfoLab in the mentioned previous report.

Some creators display links that redirects users to other platforms such as YouTube or LBRY where they can monetise their content. Some even offer almost all of their videos for free by redirecting to their YouTube channel.

Another modus operandi is for the creators to advertise on Patreon that they are looking for financing through PayPal or provide the author’s email to explore other financing alternatives.

[...]

Sometimes the content offered for a fee on Patreon is freely accessible on other platforms. Creators openly explain that they seek voluntary donation on Patreon, but that their creations will be public on YouTube. This means that the model of these platforms does not always involve a direct monetisation of the content. Creators who have built a strong reputation previously on other platforms can use Patreon as a platform to get some sponsorship which is not related to the content and give them more freedom to create.

Some users explicitly claim to use Patreon as a secondary channel to evade “censorship” by other platforms such as YouTube. Patreon seems to be perceived as a safe haven for disinformation and fringe content that has been suppressed for violating the policies of other platforms. For example, Jorge Guerra points out how Patreon acts as a back-up channel to go to in case of censorship by YouTube. Meanwhile, Alfa Mind openly claims to use Patreon to publish content that is not allowed on YouTube. “Exclusive access to videos that are prohibited on YouTube. These videos are only accessible on Patreon, as their content is very explicit and shocking”, are offered to the patrons who pay €3 per month.


In spite of Patreon’s stated policy, actors use accounts on their platform to generate revenue or donations for their content, and to provide a space to host content which was removed from other platforms (T0146: Account, T0152.012: Subscription Service Platform, T0121.001: Bypass Content Bocking).

Some actors were observed accepting donations via PayPal (T0146: Account, T0148.003: Payment Processing Platform). | +| [T0121.001 Bypass Content Blocking](../../generated_pages/techniques/T0121.001.md) |  IT00000452 In this report EU DisinfoLab identified 17 Spanish accounts that monetise and/or spread QAnon content or other conspiracy theories on Patreon. Content produced by these accounts go against Patreon’s stated policy of removing creators that “advance disinformation promoting the QAnon conspiracy theory”. EU DisinfoLab found:

In most cases, the creators monetise the content directly on Patreon (posts are only accessible for people sponsoring the creators) but there are also cases of indirect monetization (monetization through links leading to other platforms), an aspect that was flagged and analysed by Eu DisinfoLab in the mentioned previous report.

Some creators display links that redirects users to other platforms such as YouTube or LBRY where they can monetise their content. Some even offer almost all of their videos for free by redirecting to their YouTube channel.

Another modus operandi is for the creators to advertise on Patreon that they are looking for financing through PayPal or provide the author’s email to explore other financing alternatives.

[...]

Sometimes the content offered for a fee on Patreon is freely accessible on other platforms. Creators openly explain that they seek voluntary donation on Patreon, but that their creations will be public on YouTube. This means that the model of these platforms does not always involve a direct monetisation of the content. Creators who have built a strong reputation previously on other platforms can use Patreon as a platform to get some sponsorship which is not related to the content and give them more freedom to create.

Some users explicitly claim to use Patreon as a secondary channel to evade “censorship” by other platforms such as YouTube. Patreon seems to be perceived as a safe haven for disinformation and fringe content that has been suppressed for violating the policies of other platforms. For example, Jorge Guerra points out how Patreon acts as a back-up channel to go to in case of censorship by YouTube. Meanwhile, Alfa Mind openly claims to use Patreon to publish content that is not allowed on YouTube. “Exclusive access to videos that are prohibited on YouTube. These videos are only accessible on Patreon, as their content is very explicit and shocking”, are offered to the patrons who pay €3 per month.


In spite of Patreon’s stated policy, actors use accounts on their platform to generate revenue or donations for their content, and to provide a space to host content which was removed from other platforms (T0146: Account Asset, T0152.012: Subscription Service Platform, T0121.001: Bypass Content Bocking).

Some actors were observed accepting donations via PayPal (T0146: Account Asset, T0148.003: Payment Processing Platform). | +| [T0148.003 Payment Processing Platform](../../generated_pages/techniques/T0148.003.md) |  IT00000451 In this report EU DisinfoLab identified 17 Spanish accounts that monetise and/or spread QAnon content or other conspiracy theories on Patreon. Content produced by these accounts go against Patreon’s stated policy of removing creators that “advance disinformation promoting the QAnon conspiracy theory”. EU DisinfoLab found:

In most cases, the creators monetise the content directly on Patreon (posts are only accessible for people sponsoring the creators) but there are also cases of indirect monetization (monetization through links leading to other platforms), an aspect that was flagged and analysed by Eu DisinfoLab in the mentioned previous report.

Some creators display links that redirects users to other platforms such as YouTube or LBRY where they can monetise their content. Some even offer almost all of their videos for free by redirecting to their YouTube channel.

Another modus operandi is for the creators to advertise on Patreon that they are looking for financing through PayPal or provide the author’s email to explore other financing alternatives.

[...]

Sometimes the content offered for a fee on Patreon is freely accessible on other platforms. Creators openly explain that they seek voluntary donation on Patreon, but that their creations will be public on YouTube. This means that the model of these platforms does not always involve a direct monetisation of the content. Creators who have built a strong reputation previously on other platforms can use Patreon as a platform to get some sponsorship which is not related to the content and give them more freedom to create.

Some users explicitly claim to use Patreon as a secondary channel to evade “censorship” by other platforms such as YouTube. Patreon seems to be perceived as a safe haven for disinformation and fringe content that has been suppressed for violating the policies of other platforms. For example, Jorge Guerra points out how Patreon acts as a back-up channel to go to in case of censorship by YouTube. Meanwhile, Alfa Mind openly claims to use Patreon to publish content that is not allowed on YouTube. “Exclusive access to videos that are prohibited on YouTube. These videos are only accessible on Patreon, as their content is very explicit and shocking”, are offered to the patrons who pay €3 per month.


In spite of Patreon’s stated policy, actors use accounts on their platform to generate revenue or donations for their content, and to provide a space to host content which was removed from other platforms (T0146: Account Asset, T0152.012: Subscription Service Platform, T0121.001: Bypass Content Bocking).

Some actors were observed accepting donations via PayPal (T0146: Account Asset, T0148.003: Payment Processing Platform). | +| [T0152.012 Subscription Service Platform](../../generated_pages/techniques/T0152.012.md) |  IT00000453 In this report EU DisinfoLab identified 17 Spanish accounts that monetise and/or spread QAnon content or other conspiracy theories on Patreon. Content produced by these accounts go against Patreon’s stated policy of removing creators that “advance disinformation promoting the QAnon conspiracy theory”. EU DisinfoLab found:

In most cases, the creators monetise the content directly on Patreon (posts are only accessible for people sponsoring the creators) but there are also cases of indirect monetization (monetization through links leading to other platforms), an aspect that was flagged and analysed by Eu DisinfoLab in the mentioned previous report.

Some creators display links that redirects users to other platforms such as YouTube or LBRY where they can monetise their content. Some even offer almost all of their videos for free by redirecting to their YouTube channel.

Another modus operandi is for the creators to advertise on Patreon that they are looking for financing through PayPal or provide the author’s email to explore other financing alternatives.

[...]

Sometimes the content offered for a fee on Patreon is freely accessible on other platforms. Creators openly explain that they seek voluntary donation on Patreon, but that their creations will be public on YouTube. This means that the model of these platforms does not always involve a direct monetisation of the content. Creators who have built a strong reputation previously on other platforms can use Patreon as a platform to get some sponsorship which is not related to the content and give them more freedom to create.

Some users explicitly claim to use Patreon as a secondary channel to evade “censorship” by other platforms such as YouTube. Patreon seems to be perceived as a safe haven for disinformation and fringe content that has been suppressed for violating the policies of other platforms. For example, Jorge Guerra points out how Patreon acts as a back-up channel to go to in case of censorship by YouTube. Meanwhile, Alfa Mind openly claims to use Patreon to publish content that is not allowed on YouTube. “Exclusive access to videos that are prohibited on YouTube. These videos are only accessible on Patreon, as their content is very explicit and shocking”, are offered to the patrons who pay €3 per month.


In spite of Patreon’s stated policy, actors use accounts on their platform to generate revenue or donations for their content, and to provide a space to host content which was removed from other platforms (T0146: Account Asset, T0152.012: Subscription Service Platform, T0121.001: Bypass Content Bocking).

Some actors were observed accepting donations via PayPal (T0146: Account Asset, T0148.003: Payment Processing Platform). | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00113.md b/generated_pages/incidents/I00113.md index c5d0991..6626ab0 100644 --- a/generated_pages/incidents/I00113.md +++ b/generated_pages/incidents/I00113.md @@ -15,21 +15,22 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://assets.mofoprod.net/network/documents/Report_Inside_the_shadowy_world_of_disinformation_for_hire_in_Kenya_5._hcc.pdf](https://assets.mofoprod.net/network/documents/Report_Inside_the_shadowy_world_of_disinformation_for_hire_in_Kenya_5._hcc.pdf) | 2021/09/03 | - | Mozilla | [https://web.archive.org/web/20240927031830/https://assets.mofoprod.net/network/documents/Report_Inside_the_shadowy_world_of_disinformation_for_hire_in_Kenya_5._hcc.pdf](https://web.archive.org/web/20240927031830/https://assets.mofoprod.net/network/documents/Report_Inside_the_shadowy_world_of_disinformation_for_hire_in_Kenya_5._hcc.pdf) | | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0121 Manipulate Platform Algorithm](../../generated_pages/techniques/T0121.md) |  IT00000459 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | -| [T0129.005 Coordinate on Encrypted/Closed Networks](../../generated_pages/techniques/T0129.005.md) |  IT00000456 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | -| [T0146.003 Verified Account](../../generated_pages/techniques/T0146.003.md) |  IT00000461 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”. The report touches upon how actors gained access to twitter accounts, and what personas they presented:

Verified accounts are complicit. One influencer we spoke to mentioned that the people who own coveted “blue check” accounts will often rent them out for disinformation campaigns. These verified accounts can improve the campaign’s chances of trending. Says one interviewee: “The owner of the account usually receives a cut of the campaign loot”.

[...]

Many of the accounts we examined appear to give an aura of authenticity, but in reality they are not authentic. Simply looking at their date of creation won’t give you a hint as to their purpose. We had to dig deeper. The profile pictures and content of some of the accounts gave us the answers we were looking for. A common tactic these accounts utilize is using suggestive pictures of women to bait men into following them, or at least pay attention. In terms of content, many of these accounts tweeted off the same hashtags for days on end and will constantly retweet a specific set of accounts.


Actors participating in this operation rented out verified Twitter accounts (in 2021 a checkmark on Twitter verified a user’s identity), which were repurposed and used updated account imagery (T0146.003: Verified Account, T0150.007: Rented, T0150.004: Repurposed, T00145.006: Attractive Person Account Imagery). | -| [T0148.001 Online Banking Platform](../../generated_pages/techniques/T0148.001.md) |  IT00000455 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | -| [T0148.002 Bank Account](../../generated_pages/techniques/T0148.002.md) |  IT00000454 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | -| [T0150.004 Repurposed](../../generated_pages/techniques/T0150.004.md) |  IT00000463 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”. The report touches upon how actors gained access to twitter accounts, and what personas they presented:

Verified accounts are complicit. One influencer we spoke to mentioned that the people who own coveted “blue check” accounts will often rent them out for disinformation campaigns. These verified accounts can improve the campaign’s chances of trending. Says one interviewee: “The owner of the account usually receives a cut of the campaign loot”.

[...]

Many of the accounts we examined appear to give an aura of authenticity, but in reality they are not authentic. Simply looking at their date of creation won’t give you a hint as to their purpose. We had to dig deeper. The profile pictures and content of some of the accounts gave us the answers we were looking for. A common tactic these accounts utilize is using suggestive pictures of women to bait men into following them, or at least pay attention. In terms of content, many of these accounts tweeted off the same hashtags for days on end and will constantly retweet a specific set of accounts.


Actors participating in this operation rented out verified Twitter accounts (in 2021 a checkmark on Twitter verified a user’s identity), which were repurposed and used updated account imagery (T0146.003: Verified Account, T0150.007: Rented, T0150.004: Repurposed, T00145.006: Attractive Person Account Imagery). | -| [T0150.007 Rented](../../generated_pages/techniques/T0150.007.md) |  IT00000462 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”. The report touches upon how actors gained access to twitter accounts, and what personas they presented:

Verified accounts are complicit. One influencer we spoke to mentioned that the people who own coveted “blue check” accounts will often rent them out for disinformation campaigns. These verified accounts can improve the campaign’s chances of trending. Says one interviewee: “The owner of the account usually receives a cut of the campaign loot”.

[...]

Many of the accounts we examined appear to give an aura of authenticity, but in reality they are not authentic. Simply looking at their date of creation won’t give you a hint as to their purpose. We had to dig deeper. The profile pictures and content of some of the accounts gave us the answers we were looking for. A common tactic these accounts utilize is using suggestive pictures of women to bait men into following them, or at least pay attention. In terms of content, many of these accounts tweeted off the same hashtags for days on end and will constantly retweet a specific set of accounts.


Actors participating in this operation rented out verified Twitter accounts (in 2021 a checkmark on Twitter verified a user’s identity), which were repurposed and used updated account imagery (T0146.003: Verified Account, T0150.007: Rented, T0150.004: Repurposed, T00145.006: Attractive Person Account Imagery). | -| [T0151.004 Chat Platform](../../generated_pages/techniques/T0151.004.md) |  IT00000458 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | -| [T0151.007 Chat Broadcast Group](../../generated_pages/techniques/T0151.007.md) |  IT00000457 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | -| [T0151.008 Microblogging Platform](../../generated_pages/techniques/T0151.008.md) |  IT00000460 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | +| [T0121 Manipulate Platform Algorithm](../../generated_pages/techniques/T0121.md) |  IT00000459 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account Asset, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | +| [T0129.005 Coordinate on Encrypted/Closed Networks](../../generated_pages/techniques/T0129.005.md) |  IT00000456 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account Asset, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | +| [T0146.003 Verified Account Asset](../../generated_pages/techniques/T0146.003.md) |  IT00000461 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”. The report touches upon how actors gained access to twitter accounts, and what personas they presented:

Verified accounts are complicit. One influencer we spoke to mentioned that the people who own coveted “blue check” accounts will often rent them out for disinformation campaigns. These verified accounts can improve the campaign’s chances of trending. Says one interviewee: “The owner of the account usually receives a cut of the campaign loot”.

[...]

Many of the accounts we examined appear to give an aura of authenticity, but in reality they are not authentic. Simply looking at their date of creation won’t give you a hint as to their purpose. We had to dig deeper. The profile pictures and content of some of the accounts gave us the answers we were looking for. A common tactic these accounts utilize is using suggestive pictures of women to bait men into following them, or at least pay attention. In terms of content, many of these accounts tweeted off the same hashtags for days on end and will constantly retweet a specific set of accounts.


Actors participating in this operation rented out verified Twitter accounts (in 2021 a checkmark on Twitter verified a user’s identity), which were repurposed and used updated account imagery (T0146.003: Verified Account Asset, T0150.007: Rented Asset, T0150.004: Repurposed Asset, T00145.006: Attractive Person Account Imagery). | +| [T0148.001 Online Banking Platform](../../generated_pages/techniques/T0148.001.md) |  IT00000455 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account Asset, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | +| [T0148.002 Bank Account Asset](../../generated_pages/techniques/T0148.002.md) |  IT00000454 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account Asset, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | +| [T0150.004 Repurposed Asset](../../generated_pages/techniques/T0150.004.md) |  IT00000463 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”. The report touches upon how actors gained access to twitter accounts, and what personas they presented:

Verified accounts are complicit. One influencer we spoke to mentioned that the people who own coveted “blue check” accounts will often rent them out for disinformation campaigns. These verified accounts can improve the campaign’s chances of trending. Says one interviewee: “The owner of the account usually receives a cut of the campaign loot”.

[...]

Many of the accounts we examined appear to give an aura of authenticity, but in reality they are not authentic. Simply looking at their date of creation won’t give you a hint as to their purpose. We had to dig deeper. The profile pictures and content of some of the accounts gave us the answers we were looking for. A common tactic these accounts utilize is using suggestive pictures of women to bait men into following them, or at least pay attention. In terms of content, many of these accounts tweeted off the same hashtags for days on end and will constantly retweet a specific set of accounts.


Actors participating in this operation rented out verified Twitter accounts (in 2021 a checkmark on Twitter verified a user’s identity), which were repurposed and used updated account imagery (T0146.003: Verified Account Asset, T0150.007: Rented Asset, T0150.004: Repurposed Asset, T00145.006: Attractive Person Account Imagery). | +| [T0150.007 Rented Asset](../../generated_pages/techniques/T0150.007.md) |  IT00000462 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”. The report touches upon how actors gained access to twitter accounts, and what personas they presented:

Verified accounts are complicit. One influencer we spoke to mentioned that the people who own coveted “blue check” accounts will often rent them out for disinformation campaigns. These verified accounts can improve the campaign’s chances of trending. Says one interviewee: “The owner of the account usually receives a cut of the campaign loot”.

[...]

Many of the accounts we examined appear to give an aura of authenticity, but in reality they are not authentic. Simply looking at their date of creation won’t give you a hint as to their purpose. We had to dig deeper. The profile pictures and content of some of the accounts gave us the answers we were looking for. A common tactic these accounts utilize is using suggestive pictures of women to bait men into following them, or at least pay attention. In terms of content, many of these accounts tweeted off the same hashtags for days on end and will constantly retweet a specific set of accounts.


Actors participating in this operation rented out verified Twitter accounts (in 2021 a checkmark on Twitter verified a user’s identity), which were repurposed and used updated account imagery (T0146.003: Verified Account Asset, T0150.007: Rented Asset, T0150.004: Repurposed Asset, T00145.006: Attractive Person Account Imagery). | +| [T0151.004 Chat Platform](../../generated_pages/techniques/T0151.004.md) |  IT00000458 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account Asset, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | +| [T0151.007 Chat Broadcast Group](../../generated_pages/techniques/T0151.007.md) |  IT00000457 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account Asset, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | +| [T0151.008 Microblogging Platform](../../generated_pages/techniques/T0151.008.md) |  IT00000460 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account Asset, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00114.md b/generated_pages/incidents/I00114.md index 3ccc6a2..91deea6 100644 --- a/generated_pages/incidents/I00114.md +++ b/generated_pages/incidents/I00114.md @@ -15,17 +15,18 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://www.nbcnews.com/tech/tech-news/facebook-knew-radicalized-users-rcna3581](https://www.nbcnews.com/tech/tech-news/facebook-knew-radicalized-users-rcna3581) | 2021/10/22 | Brandy Zadrozny | NBC News | [https://web.archive.org/web/20211022224129/https://www.nbcnews.com/tech/tech-news/facebook-knew-radicalized-users-rcna3581](https://web.archive.org/web/20211022224129/https://www.nbcnews.com/tech/tech-news/facebook-knew-radicalized-users-rcna3581) | | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0097.208 Social Cause Persona](../../generated_pages/techniques/T0097.208.md) |  IT00000468 This report examines internal Facebook communications which reveal employees’ concerns about how the platform’s algorithm was recommending users join extremist conspiracy groups.

In summer 2019, a new Facebook user named Carol Smith signed up for the platform, describing herself as a politically conservative mother from Wilmington, North Carolina. Smith’s account indicated an interest in politics, parenting and Christianity and followed a few of her favorite brands, including Fox News and then-President Donald Trump.

Though Smith had never expressed interest in conspiracy theories, in just two days Facebook was recommending she join groups dedicated to QAnon, a sprawling and baseless conspiracy theory and movement that claimed Trump was secretly saving the world from a cabal of pedophiles and Satanists.

Smith didn’t follow the recommended QAnon groups, but whatever algorithm Facebook was using to determine how she should engage with the platform pushed ahead just the same. Within one week, Smith’s feed was full of groups and pages that had violated Facebook’s own rules, including those against hate speech and disinformation.

Smith wasn’t a real person. A researcher employed by Facebook invented the account, along with those of other fictitious “test users” in 2019 and 2020, as part of an experiment in studying the platform’s role in misinforming and polarizing users through its recommendations systems.

That researcher said Smith’s Facebook experience was “a barrage of extreme, conspiratorial, and graphic content.”


Facebook’s Algorithm suggested users join groups which supported the QAnon movement (T0151.001: Social Media Platform, T0151.002: Online Community Group, T0153.006: Content Recommendation Algorithm, T0097.208: Social Cause Persona).

Further investigation by Facebook uncovered that its advertising platform had been used to promote QAnon narratives (T0146: Account, T0114: Deliver Ads, T0153.005: Online Advertising Platform):

For years, company researchers had been running experiments like Carol Smith’s to gauge the platform’s hand in radicalizing users, according to the documents seen by NBC News.

This internal work repeatedly found that recommendation tools pushed users into extremist groups, findings that helped inform policy changes and tweaks to recommendations and news feed rankings. Those rankings are a tentacled, ever-evolving system widely known as “the algorithm” that pushes content to users. But the research at that time stopped well short of inspiring any movement to change the groups and pages themselves.

That reluctance was indicative of “a pattern at Facebook,” Haugen told reporters this month. “They want the shortest path between their current policies and any action.”

[...]

By summer 2020, Facebook was hosting thousands of private QAnon groups and pages, with millions of members and followers, according to an unreleased internal investigation.

A year after the FBI designated QAnon as a potential domestic terrorist threat in the wake of standoffs, alleged planned kidnappings, harassment campaigns and shootings, Facebook labeled QAnon a “Violence Inciting Conspiracy Network” and banned it from the platform, along with militias and other violent social movements. A small team working across several of Facebook’s departments found its platforms had hosted hundreds of ads on Facebook and Instagram worth thousands of dollars and millions of views, “praising, supporting, or representing” the conspiracy theory.

[...]

For many employees inside Facebook, the enforcement came too late, according to posts left on Workplace, the company’s internal message board.

“We’ve known for over a year now that our recommendation systems can very quickly lead users down the path to conspiracy theories and groups,” one integrity researcher, whose name had been redacted, wrote in a post announcing she was leaving the company. “This fringe group has grown to national prominence, with QAnon congressional candidates and QAnon hashtags and groups trending in the mainstream. We were willing to act only * after * things had spiraled into a dire state.”

While Facebook’s ban initially appeared effective, a problem remained: The removal of groups and pages didn’t wipe out QAnon’s most extreme followers, who continued to organize on the platform.

“There was enough evidence to raise red flags in the expert community that Facebook and other platforms failed to address QAnon’s violent extremist dimension,” said Marc-André Argentino, a research fellow at King’s College London’s International Centre for the Study of Radicalisation, who has extensively studied QAnon.

Believers simply rebranded as anti-child-trafficking groups or migrated to other communities, including those around the anti-vaccine movement.

[...]

These conspiracy groups had become the fastest-growing groups on Facebook, according to the report, but Facebook wasn’t able to control their “meteoric growth,” the researchers wrote, “because we were looking at each entity individually, rather than as a cohesive movement.” A Facebook spokesperson told BuzzFeed News it took many steps to limit election misinformation but that it was unable to catch everything.
| -| [T0114 Deliver Ads](../../generated_pages/techniques/T0114.md) |  IT00000469 This report examines internal Facebook communications which reveal employees’ concerns about how the platform’s algorithm was recommending users join extremist conspiracy groups.

In summer 2019, a new Facebook user named Carol Smith signed up for the platform, describing herself as a politically conservative mother from Wilmington, North Carolina. Smith’s account indicated an interest in politics, parenting and Christianity and followed a few of her favorite brands, including Fox News and then-President Donald Trump.

Though Smith had never expressed interest in conspiracy theories, in just two days Facebook was recommending she join groups dedicated to QAnon, a sprawling and baseless conspiracy theory and movement that claimed Trump was secretly saving the world from a cabal of pedophiles and Satanists.

Smith didn’t follow the recommended QAnon groups, but whatever algorithm Facebook was using to determine how she should engage with the platform pushed ahead just the same. Within one week, Smith’s feed was full of groups and pages that had violated Facebook’s own rules, including those against hate speech and disinformation.

Smith wasn’t a real person. A researcher employed by Facebook invented the account, along with those of other fictitious “test users” in 2019 and 2020, as part of an experiment in studying the platform’s role in misinforming and polarizing users through its recommendations systems.

That researcher said Smith’s Facebook experience was “a barrage of extreme, conspiratorial, and graphic content.”


Facebook’s Algorithm suggested users join groups which supported the QAnon movement (T0151.001: Social Media Platform, T0151.002: Online Community Group, T0153.006: Content Recommendation Algorithm, T0097.208: Social Cause Persona).

Further investigation by Facebook uncovered that its advertising platform had been used to promote QAnon narratives (T0146: Account, T0114: Deliver Ads, T0153.005: Online Advertising Platform):

For years, company researchers had been running experiments like Carol Smith’s to gauge the platform’s hand in radicalizing users, according to the documents seen by NBC News.

This internal work repeatedly found that recommendation tools pushed users into extremist groups, findings that helped inform policy changes and tweaks to recommendations and news feed rankings. Those rankings are a tentacled, ever-evolving system widely known as “the algorithm” that pushes content to users. But the research at that time stopped well short of inspiring any movement to change the groups and pages themselves.

That reluctance was indicative of “a pattern at Facebook,” Haugen told reporters this month. “They want the shortest path between their current policies and any action.”

[...]

By summer 2020, Facebook was hosting thousands of private QAnon groups and pages, with millions of members and followers, according to an unreleased internal investigation.

A year after the FBI designated QAnon as a potential domestic terrorist threat in the wake of standoffs, alleged planned kidnappings, harassment campaigns and shootings, Facebook labeled QAnon a “Violence Inciting Conspiracy Network” and banned it from the platform, along with militias and other violent social movements. A small team working across several of Facebook’s departments found its platforms had hosted hundreds of ads on Facebook and Instagram worth thousands of dollars and millions of views, “praising, supporting, or representing” the conspiracy theory.

[...]

For many employees inside Facebook, the enforcement came too late, according to posts left on Workplace, the company’s internal message board.

“We’ve known for over a year now that our recommendation systems can very quickly lead users down the path to conspiracy theories and groups,” one integrity researcher, whose name had been redacted, wrote in a post announcing she was leaving the company. “This fringe group has grown to national prominence, with QAnon congressional candidates and QAnon hashtags and groups trending in the mainstream. We were willing to act only * after * things had spiraled into a dire state.”

While Facebook’s ban initially appeared effective, a problem remained: The removal of groups and pages didn’t wipe out QAnon’s most extreme followers, who continued to organize on the platform.

“There was enough evidence to raise red flags in the expert community that Facebook and other platforms failed to address QAnon’s violent extremist dimension,” said Marc-André Argentino, a research fellow at King’s College London’s International Centre for the Study of Radicalisation, who has extensively studied QAnon.

Believers simply rebranded as anti-child-trafficking groups or migrated to other communities, including those around the anti-vaccine movement.

[...]

These conspiracy groups had become the fastest-growing groups on Facebook, according to the report, but Facebook wasn’t able to control their “meteoric growth,” the researchers wrote, “because we were looking at each entity individually, rather than as a cohesive movement.” A Facebook spokesperson told BuzzFeed News it took many steps to limit election misinformation but that it was unable to catch everything.
| -| [T0151.001 Social Media Platform](../../generated_pages/techniques/T0151.001.md) |  IT00000465 This report examines internal Facebook communications which reveal employees’ concerns about how the platform’s algorithm was recommending users join extremist conspiracy groups.

In summer 2019, a new Facebook user named Carol Smith signed up for the platform, describing herself as a politically conservative mother from Wilmington, North Carolina. Smith’s account indicated an interest in politics, parenting and Christianity and followed a few of her favorite brands, including Fox News and then-President Donald Trump.

Though Smith had never expressed interest in conspiracy theories, in just two days Facebook was recommending she join groups dedicated to QAnon, a sprawling and baseless conspiracy theory and movement that claimed Trump was secretly saving the world from a cabal of pedophiles and Satanists.

Smith didn’t follow the recommended QAnon groups, but whatever algorithm Facebook was using to determine how she should engage with the platform pushed ahead just the same. Within one week, Smith’s feed was full of groups and pages that had violated Facebook’s own rules, including those against hate speech and disinformation.

Smith wasn’t a real person. A researcher employed by Facebook invented the account, along with those of other fictitious “test users” in 2019 and 2020, as part of an experiment in studying the platform’s role in misinforming and polarizing users through its recommendations systems.

That researcher said Smith’s Facebook experience was “a barrage of extreme, conspiratorial, and graphic content.”


Facebook’s Algorithm suggested users join groups which supported the QAnon movement (T0151.001: Social Media Platform, T0151.002: Online Community Group, T0153.006: Content Recommendation Algorithm, T0097.208: Social Cause Persona).

Further investigation by Facebook uncovered that its advertising platform had been used to promote QAnon narratives (T0146: Account, T0114: Deliver Ads, T0153.005: Online Advertising Platform):

For years, company researchers had been running experiments like Carol Smith’s to gauge the platform’s hand in radicalizing users, according to the documents seen by NBC News.

This internal work repeatedly found that recommendation tools pushed users into extremist groups, findings that helped inform policy changes and tweaks to recommendations and news feed rankings. Those rankings are a tentacled, ever-evolving system widely known as “the algorithm” that pushes content to users. But the research at that time stopped well short of inspiring any movement to change the groups and pages themselves.

That reluctance was indicative of “a pattern at Facebook,” Haugen told reporters this month. “They want the shortest path between their current policies and any action.”

[...]

By summer 2020, Facebook was hosting thousands of private QAnon groups and pages, with millions of members and followers, according to an unreleased internal investigation.

A year after the FBI designated QAnon as a potential domestic terrorist threat in the wake of standoffs, alleged planned kidnappings, harassment campaigns and shootings, Facebook labeled QAnon a “Violence Inciting Conspiracy Network” and banned it from the platform, along with militias and other violent social movements. A small team working across several of Facebook’s departments found its platforms had hosted hundreds of ads on Facebook and Instagram worth thousands of dollars and millions of views, “praising, supporting, or representing” the conspiracy theory.

[...]

For many employees inside Facebook, the enforcement came too late, according to posts left on Workplace, the company’s internal message board.

“We’ve known for over a year now that our recommendation systems can very quickly lead users down the path to conspiracy theories and groups,” one integrity researcher, whose name had been redacted, wrote in a post announcing she was leaving the company. “This fringe group has grown to national prominence, with QAnon congressional candidates and QAnon hashtags and groups trending in the mainstream. We were willing to act only * after * things had spiraled into a dire state.”

While Facebook’s ban initially appeared effective, a problem remained: The removal of groups and pages didn’t wipe out QAnon’s most extreme followers, who continued to organize on the platform.

“There was enough evidence to raise red flags in the expert community that Facebook and other platforms failed to address QAnon’s violent extremist dimension,” said Marc-André Argentino, a research fellow at King’s College London’s International Centre for the Study of Radicalisation, who has extensively studied QAnon.

Believers simply rebranded as anti-child-trafficking groups or migrated to other communities, including those around the anti-vaccine movement.

[...]

These conspiracy groups had become the fastest-growing groups on Facebook, according to the report, but Facebook wasn’t able to control their “meteoric growth,” the researchers wrote, “because we were looking at each entity individually, rather than as a cohesive movement.” A Facebook spokesperson told BuzzFeed News it took many steps to limit election misinformation but that it was unable to catch everything.
| -| [T0151.002 Online Community Group](../../generated_pages/techniques/T0151.002.md) |  IT00000466 This report examines internal Facebook communications which reveal employees’ concerns about how the platform’s algorithm was recommending users join extremist conspiracy groups.

In summer 2019, a new Facebook user named Carol Smith signed up for the platform, describing herself as a politically conservative mother from Wilmington, North Carolina. Smith’s account indicated an interest in politics, parenting and Christianity and followed a few of her favorite brands, including Fox News and then-President Donald Trump.

Though Smith had never expressed interest in conspiracy theories, in just two days Facebook was recommending she join groups dedicated to QAnon, a sprawling and baseless conspiracy theory and movement that claimed Trump was secretly saving the world from a cabal of pedophiles and Satanists.

Smith didn’t follow the recommended QAnon groups, but whatever algorithm Facebook was using to determine how she should engage with the platform pushed ahead just the same. Within one week, Smith’s feed was full of groups and pages that had violated Facebook’s own rules, including those against hate speech and disinformation.

Smith wasn’t a real person. A researcher employed by Facebook invented the account, along with those of other fictitious “test users” in 2019 and 2020, as part of an experiment in studying the platform’s role in misinforming and polarizing users through its recommendations systems.

That researcher said Smith’s Facebook experience was “a barrage of extreme, conspiratorial, and graphic content.”


Facebook’s Algorithm suggested users join groups which supported the QAnon movement (T0151.001: Social Media Platform, T0151.002: Online Community Group, T0153.006: Content Recommendation Algorithm, T0097.208: Social Cause Persona).

Further investigation by Facebook uncovered that its advertising platform had been used to promote QAnon narratives (T0146: Account, T0114: Deliver Ads, T0153.005: Online Advertising Platform):

For years, company researchers had been running experiments like Carol Smith’s to gauge the platform’s hand in radicalizing users, according to the documents seen by NBC News.

This internal work repeatedly found that recommendation tools pushed users into extremist groups, findings that helped inform policy changes and tweaks to recommendations and news feed rankings. Those rankings are a tentacled, ever-evolving system widely known as “the algorithm” that pushes content to users. But the research at that time stopped well short of inspiring any movement to change the groups and pages themselves.

That reluctance was indicative of “a pattern at Facebook,” Haugen told reporters this month. “They want the shortest path between their current policies and any action.”

[...]

By summer 2020, Facebook was hosting thousands of private QAnon groups and pages, with millions of members and followers, according to an unreleased internal investigation.

A year after the FBI designated QAnon as a potential domestic terrorist threat in the wake of standoffs, alleged planned kidnappings, harassment campaigns and shootings, Facebook labeled QAnon a “Violence Inciting Conspiracy Network” and banned it from the platform, along with militias and other violent social movements. A small team working across several of Facebook’s departments found its platforms had hosted hundreds of ads on Facebook and Instagram worth thousands of dollars and millions of views, “praising, supporting, or representing” the conspiracy theory.

[...]

For many employees inside Facebook, the enforcement came too late, according to posts left on Workplace, the company’s internal message board.

“We’ve known for over a year now that our recommendation systems can very quickly lead users down the path to conspiracy theories and groups,” one integrity researcher, whose name had been redacted, wrote in a post announcing she was leaving the company. “This fringe group has grown to national prominence, with QAnon congressional candidates and QAnon hashtags and groups trending in the mainstream. We were willing to act only * after * things had spiraled into a dire state.”

While Facebook’s ban initially appeared effective, a problem remained: The removal of groups and pages didn’t wipe out QAnon’s most extreme followers, who continued to organize on the platform.

“There was enough evidence to raise red flags in the expert community that Facebook and other platforms failed to address QAnon’s violent extremist dimension,” said Marc-André Argentino, a research fellow at King’s College London’s International Centre for the Study of Radicalisation, who has extensively studied QAnon.

Believers simply rebranded as anti-child-trafficking groups or migrated to other communities, including those around the anti-vaccine movement.

[...]

These conspiracy groups had become the fastest-growing groups on Facebook, according to the report, but Facebook wasn’t able to control their “meteoric growth,” the researchers wrote, “because we were looking at each entity individually, rather than as a cohesive movement.” A Facebook spokesperson told BuzzFeed News it took many steps to limit election misinformation but that it was unable to catch everything.
| -| [T0153.005 Online Advertising Platform](../../generated_pages/techniques/T0153.005.md) |  IT00000470 This report examines internal Facebook communications which reveal employees’ concerns about how the platform’s algorithm was recommending users join extremist conspiracy groups.

In summer 2019, a new Facebook user named Carol Smith signed up for the platform, describing herself as a politically conservative mother from Wilmington, North Carolina. Smith’s account indicated an interest in politics, parenting and Christianity and followed a few of her favorite brands, including Fox News and then-President Donald Trump.

Though Smith had never expressed interest in conspiracy theories, in just two days Facebook was recommending she join groups dedicated to QAnon, a sprawling and baseless conspiracy theory and movement that claimed Trump was secretly saving the world from a cabal of pedophiles and Satanists.

Smith didn’t follow the recommended QAnon groups, but whatever algorithm Facebook was using to determine how she should engage with the platform pushed ahead just the same. Within one week, Smith’s feed was full of groups and pages that had violated Facebook’s own rules, including those against hate speech and disinformation.

Smith wasn’t a real person. A researcher employed by Facebook invented the account, along with those of other fictitious “test users” in 2019 and 2020, as part of an experiment in studying the platform’s role in misinforming and polarizing users through its recommendations systems.

That researcher said Smith’s Facebook experience was “a barrage of extreme, conspiratorial, and graphic content.”


Facebook’s Algorithm suggested users join groups which supported the QAnon movement (T0151.001: Social Media Platform, T0151.002: Online Community Group, T0153.006: Content Recommendation Algorithm, T0097.208: Social Cause Persona).

Further investigation by Facebook uncovered that its advertising platform had been used to promote QAnon narratives (T0146: Account, T0114: Deliver Ads, T0153.005: Online Advertising Platform):

For years, company researchers had been running experiments like Carol Smith’s to gauge the platform’s hand in radicalizing users, according to the documents seen by NBC News.

This internal work repeatedly found that recommendation tools pushed users into extremist groups, findings that helped inform policy changes and tweaks to recommendations and news feed rankings. Those rankings are a tentacled, ever-evolving system widely known as “the algorithm” that pushes content to users. But the research at that time stopped well short of inspiring any movement to change the groups and pages themselves.

That reluctance was indicative of “a pattern at Facebook,” Haugen told reporters this month. “They want the shortest path between their current policies and any action.”

[...]

By summer 2020, Facebook was hosting thousands of private QAnon groups and pages, with millions of members and followers, according to an unreleased internal investigation.

A year after the FBI designated QAnon as a potential domestic terrorist threat in the wake of standoffs, alleged planned kidnappings, harassment campaigns and shootings, Facebook labeled QAnon a “Violence Inciting Conspiracy Network” and banned it from the platform, along with militias and other violent social movements. A small team working across several of Facebook’s departments found its platforms had hosted hundreds of ads on Facebook and Instagram worth thousands of dollars and millions of views, “praising, supporting, or representing” the conspiracy theory.

[...]

For many employees inside Facebook, the enforcement came too late, according to posts left on Workplace, the company’s internal message board.

“We’ve known for over a year now that our recommendation systems can very quickly lead users down the path to conspiracy theories and groups,” one integrity researcher, whose name had been redacted, wrote in a post announcing she was leaving the company. “This fringe group has grown to national prominence, with QAnon congressional candidates and QAnon hashtags and groups trending in the mainstream. We were willing to act only * after * things had spiraled into a dire state.”

While Facebook’s ban initially appeared effective, a problem remained: The removal of groups and pages didn’t wipe out QAnon’s most extreme followers, who continued to organize on the platform.

“There was enough evidence to raise red flags in the expert community that Facebook and other platforms failed to address QAnon’s violent extremist dimension,” said Marc-André Argentino, a research fellow at King’s College London’s International Centre for the Study of Radicalisation, who has extensively studied QAnon.

Believers simply rebranded as anti-child-trafficking groups or migrated to other communities, including those around the anti-vaccine movement.

[...]

These conspiracy groups had become the fastest-growing groups on Facebook, according to the report, but Facebook wasn’t able to control their “meteoric growth,” the researchers wrote, “because we were looking at each entity individually, rather than as a cohesive movement.” A Facebook spokesperson told BuzzFeed News it took many steps to limit election misinformation but that it was unable to catch everything.
| -| [T0153.006 Content Recommendation Algorithm](../../generated_pages/techniques/T0153.006.md) |  IT00000467 This report examines internal Facebook communications which reveal employees’ concerns about how the platform’s algorithm was recommending users join extremist conspiracy groups.

In summer 2019, a new Facebook user named Carol Smith signed up for the platform, describing herself as a politically conservative mother from Wilmington, North Carolina. Smith’s account indicated an interest in politics, parenting and Christianity and followed a few of her favorite brands, including Fox News and then-President Donald Trump.

Though Smith had never expressed interest in conspiracy theories, in just two days Facebook was recommending she join groups dedicated to QAnon, a sprawling and baseless conspiracy theory and movement that claimed Trump was secretly saving the world from a cabal of pedophiles and Satanists.

Smith didn’t follow the recommended QAnon groups, but whatever algorithm Facebook was using to determine how she should engage with the platform pushed ahead just the same. Within one week, Smith’s feed was full of groups and pages that had violated Facebook’s own rules, including those against hate speech and disinformation.

Smith wasn’t a real person. A researcher employed by Facebook invented the account, along with those of other fictitious “test users” in 2019 and 2020, as part of an experiment in studying the platform’s role in misinforming and polarizing users through its recommendations systems.

That researcher said Smith’s Facebook experience was “a barrage of extreme, conspiratorial, and graphic content.”


Facebook’s Algorithm suggested users join groups which supported the QAnon movement (T0151.001: Social Media Platform, T0151.002: Online Community Group, T0153.006: Content Recommendation Algorithm, T0097.208: Social Cause Persona).

Further investigation by Facebook uncovered that its advertising platform had been used to promote QAnon narratives (T0146: Account, T0114: Deliver Ads, T0153.005: Online Advertising Platform):

For years, company researchers had been running experiments like Carol Smith’s to gauge the platform’s hand in radicalizing users, according to the documents seen by NBC News.

This internal work repeatedly found that recommendation tools pushed users into extremist groups, findings that helped inform policy changes and tweaks to recommendations and news feed rankings. Those rankings are a tentacled, ever-evolving system widely known as “the algorithm” that pushes content to users. But the research at that time stopped well short of inspiring any movement to change the groups and pages themselves.

That reluctance was indicative of “a pattern at Facebook,” Haugen told reporters this month. “They want the shortest path between their current policies and any action.”

[...]

By summer 2020, Facebook was hosting thousands of private QAnon groups and pages, with millions of members and followers, according to an unreleased internal investigation.

A year after the FBI designated QAnon as a potential domestic terrorist threat in the wake of standoffs, alleged planned kidnappings, harassment campaigns and shootings, Facebook labeled QAnon a “Violence Inciting Conspiracy Network” and banned it from the platform, along with militias and other violent social movements. A small team working across several of Facebook’s departments found its platforms had hosted hundreds of ads on Facebook and Instagram worth thousands of dollars and millions of views, “praising, supporting, or representing” the conspiracy theory.

[...]

For many employees inside Facebook, the enforcement came too late, according to posts left on Workplace, the company’s internal message board.

“We’ve known for over a year now that our recommendation systems can very quickly lead users down the path to conspiracy theories and groups,” one integrity researcher, whose name had been redacted, wrote in a post announcing she was leaving the company. “This fringe group has grown to national prominence, with QAnon congressional candidates and QAnon hashtags and groups trending in the mainstream. We were willing to act only * after * things had spiraled into a dire state.”

While Facebook’s ban initially appeared effective, a problem remained: The removal of groups and pages didn’t wipe out QAnon’s most extreme followers, who continued to organize on the platform.

“There was enough evidence to raise red flags in the expert community that Facebook and other platforms failed to address QAnon’s violent extremist dimension,” said Marc-André Argentino, a research fellow at King’s College London’s International Centre for the Study of Radicalisation, who has extensively studied QAnon.

Believers simply rebranded as anti-child-trafficking groups or migrated to other communities, including those around the anti-vaccine movement.

[...]

These conspiracy groups had become the fastest-growing groups on Facebook, according to the report, but Facebook wasn’t able to control their “meteoric growth,” the researchers wrote, “because we were looking at each entity individually, rather than as a cohesive movement.” A Facebook spokesperson told BuzzFeed News it took many steps to limit election misinformation but that it was unable to catch everything.
| +| [T0097.208 Social Cause Persona](../../generated_pages/techniques/T0097.208.md) |  IT00000468 This report examines internal Facebook communications which reveal employees’ concerns about how the platform’s algorithm was recommending users join extremist conspiracy groups.

In summer 2019, a new Facebook user named Carol Smith signed up for the platform, describing herself as a politically conservative mother from Wilmington, North Carolina. Smith’s account indicated an interest in politics, parenting and Christianity and followed a few of her favorite brands, including Fox News and then-President Donald Trump.

Though Smith had never expressed interest in conspiracy theories, in just two days Facebook was recommending she join groups dedicated to QAnon, a sprawling and baseless conspiracy theory and movement that claimed Trump was secretly saving the world from a cabal of pedophiles and Satanists.

Smith didn’t follow the recommended QAnon groups, but whatever algorithm Facebook was using to determine how she should engage with the platform pushed ahead just the same. Within one week, Smith’s feed was full of groups and pages that had violated Facebook’s own rules, including those against hate speech and disinformation.

Smith wasn’t a real person. A researcher employed by Facebook invented the account, along with those of other fictitious “test users” in 2019 and 2020, as part of an experiment in studying the platform’s role in misinforming and polarizing users through its recommendations systems.

That researcher said Smith’s Facebook experience was “a barrage of extreme, conspiratorial, and graphic content.”


Facebook’s Algorithm suggested users join groups which supported the QAnon movement (T0151.001: Social Media Platform, T0151.002: Online Community Group, T0153.006: Content Recommendation Algorithm, T0097.208: Social Cause Persona).

Further investigation by Facebook uncovered that its advertising platform had been used to promote QAnon narratives (T0146: Account Asset, T0114: Deliver Ads, T0153.005: Online Advertising Platform):

For years, company researchers had been running experiments like Carol Smith’s to gauge the platform’s hand in radicalizing users, according to the documents seen by NBC News.

This internal work repeatedly found that recommendation tools pushed users into extremist groups, findings that helped inform policy changes and tweaks to recommendations and news feed rankings. Those rankings are a tentacled, ever-evolving system widely known as “the algorithm” that pushes content to users. But the research at that time stopped well short of inspiring any movement to change the groups and pages themselves.

That reluctance was indicative of “a pattern at Facebook,” Haugen told reporters this month. “They want the shortest path between their current policies and any action.”

[...]

By summer 2020, Facebook was hosting thousands of private QAnon groups and pages, with millions of members and followers, according to an unreleased internal investigation.

A year after the FBI designated QAnon as a potential domestic terrorist threat in the wake of standoffs, alleged planned kidnappings, harassment campaigns and shootings, Facebook labeled QAnon a “Violence Inciting Conspiracy Network” and banned it from the platform, along with militias and other violent social movements. A small team working across several of Facebook’s departments found its platforms had hosted hundreds of ads on Facebook and Instagram worth thousands of dollars and millions of views, “praising, supporting, or representing” the conspiracy theory.

[...]

For many employees inside Facebook, the enforcement came too late, according to posts left on Workplace, the company’s internal message board.

“We’ve known for over a year now that our recommendation systems can very quickly lead users down the path to conspiracy theories and groups,” one integrity researcher, whose name had been redacted, wrote in a post announcing she was leaving the company. “This fringe group has grown to national prominence, with QAnon congressional candidates and QAnon hashtags and groups trending in the mainstream. We were willing to act only * after * things had spiraled into a dire state.”

While Facebook’s ban initially appeared effective, a problem remained: The removal of groups and pages didn’t wipe out QAnon’s most extreme followers, who continued to organize on the platform.

“There was enough evidence to raise red flags in the expert community that Facebook and other platforms failed to address QAnon’s violent extremist dimension,” said Marc-André Argentino, a research fellow at King’s College London’s International Centre for the Study of Radicalisation, who has extensively studied QAnon.

Believers simply rebranded as anti-child-trafficking groups or migrated to other communities, including those around the anti-vaccine movement.

[...]

These conspiracy groups had become the fastest-growing groups on Facebook, according to the report, but Facebook wasn’t able to control their “meteoric growth,” the researchers wrote, “because we were looking at each entity individually, rather than as a cohesive movement.” A Facebook spokesperson told BuzzFeed News it took many steps to limit election misinformation but that it was unable to catch everything.
| +| [T0114 Deliver Ads](../../generated_pages/techniques/T0114.md) |  IT00000469 This report examines internal Facebook communications which reveal employees’ concerns about how the platform’s algorithm was recommending users join extremist conspiracy groups.

In summer 2019, a new Facebook user named Carol Smith signed up for the platform, describing herself as a politically conservative mother from Wilmington, North Carolina. Smith’s account indicated an interest in politics, parenting and Christianity and followed a few of her favorite brands, including Fox News and then-President Donald Trump.

Though Smith had never expressed interest in conspiracy theories, in just two days Facebook was recommending she join groups dedicated to QAnon, a sprawling and baseless conspiracy theory and movement that claimed Trump was secretly saving the world from a cabal of pedophiles and Satanists.

Smith didn’t follow the recommended QAnon groups, but whatever algorithm Facebook was using to determine how she should engage with the platform pushed ahead just the same. Within one week, Smith’s feed was full of groups and pages that had violated Facebook’s own rules, including those against hate speech and disinformation.

Smith wasn’t a real person. A researcher employed by Facebook invented the account, along with those of other fictitious “test users” in 2019 and 2020, as part of an experiment in studying the platform’s role in misinforming and polarizing users through its recommendations systems.

That researcher said Smith’s Facebook experience was “a barrage of extreme, conspiratorial, and graphic content.”


Facebook’s Algorithm suggested users join groups which supported the QAnon movement (T0151.001: Social Media Platform, T0151.002: Online Community Group, T0153.006: Content Recommendation Algorithm, T0097.208: Social Cause Persona).

Further investigation by Facebook uncovered that its advertising platform had been used to promote QAnon narratives (T0146: Account Asset, T0114: Deliver Ads, T0153.005: Online Advertising Platform):

For years, company researchers had been running experiments like Carol Smith’s to gauge the platform’s hand in radicalizing users, according to the documents seen by NBC News.

This internal work repeatedly found that recommendation tools pushed users into extremist groups, findings that helped inform policy changes and tweaks to recommendations and news feed rankings. Those rankings are a tentacled, ever-evolving system widely known as “the algorithm” that pushes content to users. But the research at that time stopped well short of inspiring any movement to change the groups and pages themselves.

That reluctance was indicative of “a pattern at Facebook,” Haugen told reporters this month. “They want the shortest path between their current policies and any action.”

[...]

By summer 2020, Facebook was hosting thousands of private QAnon groups and pages, with millions of members and followers, according to an unreleased internal investigation.

A year after the FBI designated QAnon as a potential domestic terrorist threat in the wake of standoffs, alleged planned kidnappings, harassment campaigns and shootings, Facebook labeled QAnon a “Violence Inciting Conspiracy Network” and banned it from the platform, along with militias and other violent social movements. A small team working across several of Facebook’s departments found its platforms had hosted hundreds of ads on Facebook and Instagram worth thousands of dollars and millions of views, “praising, supporting, or representing” the conspiracy theory.

[...]

For many employees inside Facebook, the enforcement came too late, according to posts left on Workplace, the company’s internal message board.

“We’ve known for over a year now that our recommendation systems can very quickly lead users down the path to conspiracy theories and groups,” one integrity researcher, whose name had been redacted, wrote in a post announcing she was leaving the company. “This fringe group has grown to national prominence, with QAnon congressional candidates and QAnon hashtags and groups trending in the mainstream. We were willing to act only * after * things had spiraled into a dire state.”

While Facebook’s ban initially appeared effective, a problem remained: The removal of groups and pages didn’t wipe out QAnon’s most extreme followers, who continued to organize on the platform.

“There was enough evidence to raise red flags in the expert community that Facebook and other platforms failed to address QAnon’s violent extremist dimension,” said Marc-André Argentino, a research fellow at King’s College London’s International Centre for the Study of Radicalisation, who has extensively studied QAnon.

Believers simply rebranded as anti-child-trafficking groups or migrated to other communities, including those around the anti-vaccine movement.

[...]

These conspiracy groups had become the fastest-growing groups on Facebook, according to the report, but Facebook wasn’t able to control their “meteoric growth,” the researchers wrote, “because we were looking at each entity individually, rather than as a cohesive movement.” A Facebook spokesperson told BuzzFeed News it took many steps to limit election misinformation but that it was unable to catch everything.
| +| [T0151.001 Social Media Platform](../../generated_pages/techniques/T0151.001.md) |  IT00000465 This report examines internal Facebook communications which reveal employees’ concerns about how the platform’s algorithm was recommending users join extremist conspiracy groups.

In summer 2019, a new Facebook user named Carol Smith signed up for the platform, describing herself as a politically conservative mother from Wilmington, North Carolina. Smith’s account indicated an interest in politics, parenting and Christianity and followed a few of her favorite brands, including Fox News and then-President Donald Trump.

Though Smith had never expressed interest in conspiracy theories, in just two days Facebook was recommending she join groups dedicated to QAnon, a sprawling and baseless conspiracy theory and movement that claimed Trump was secretly saving the world from a cabal of pedophiles and Satanists.

Smith didn’t follow the recommended QAnon groups, but whatever algorithm Facebook was using to determine how she should engage with the platform pushed ahead just the same. Within one week, Smith’s feed was full of groups and pages that had violated Facebook’s own rules, including those against hate speech and disinformation.

Smith wasn’t a real person. A researcher employed by Facebook invented the account, along with those of other fictitious “test users” in 2019 and 2020, as part of an experiment in studying the platform’s role in misinforming and polarizing users through its recommendations systems.

That researcher said Smith’s Facebook experience was “a barrage of extreme, conspiratorial, and graphic content.”


Facebook’s Algorithm suggested users join groups which supported the QAnon movement (T0151.001: Social Media Platform, T0151.002: Online Community Group, T0153.006: Content Recommendation Algorithm, T0097.208: Social Cause Persona).

Further investigation by Facebook uncovered that its advertising platform had been used to promote QAnon narratives (T0146: Account Asset, T0114: Deliver Ads, T0153.005: Online Advertising Platform):

For years, company researchers had been running experiments like Carol Smith’s to gauge the platform’s hand in radicalizing users, according to the documents seen by NBC News.

This internal work repeatedly found that recommendation tools pushed users into extremist groups, findings that helped inform policy changes and tweaks to recommendations and news feed rankings. Those rankings are a tentacled, ever-evolving system widely known as “the algorithm” that pushes content to users. But the research at that time stopped well short of inspiring any movement to change the groups and pages themselves.

That reluctance was indicative of “a pattern at Facebook,” Haugen told reporters this month. “They want the shortest path between their current policies and any action.”

[...]

By summer 2020, Facebook was hosting thousands of private QAnon groups and pages, with millions of members and followers, according to an unreleased internal investigation.

A year after the FBI designated QAnon as a potential domestic terrorist threat in the wake of standoffs, alleged planned kidnappings, harassment campaigns and shootings, Facebook labeled QAnon a “Violence Inciting Conspiracy Network” and banned it from the platform, along with militias and other violent social movements. A small team working across several of Facebook’s departments found its platforms had hosted hundreds of ads on Facebook and Instagram worth thousands of dollars and millions of views, “praising, supporting, or representing” the conspiracy theory.

[...]

For many employees inside Facebook, the enforcement came too late, according to posts left on Workplace, the company’s internal message board.

“We’ve known for over a year now that our recommendation systems can very quickly lead users down the path to conspiracy theories and groups,” one integrity researcher, whose name had been redacted, wrote in a post announcing she was leaving the company. “This fringe group has grown to national prominence, with QAnon congressional candidates and QAnon hashtags and groups trending in the mainstream. We were willing to act only * after * things had spiraled into a dire state.”

While Facebook’s ban initially appeared effective, a problem remained: The removal of groups and pages didn’t wipe out QAnon’s most extreme followers, who continued to organize on the platform.

“There was enough evidence to raise red flags in the expert community that Facebook and other platforms failed to address QAnon’s violent extremist dimension,” said Marc-André Argentino, a research fellow at King’s College London’s International Centre for the Study of Radicalisation, who has extensively studied QAnon.

Believers simply rebranded as anti-child-trafficking groups or migrated to other communities, including those around the anti-vaccine movement.

[...]

These conspiracy groups had become the fastest-growing groups on Facebook, according to the report, but Facebook wasn’t able to control their “meteoric growth,” the researchers wrote, “because we were looking at each entity individually, rather than as a cohesive movement.” A Facebook spokesperson told BuzzFeed News it took many steps to limit election misinformation but that it was unable to catch everything.
| +| [T0151.002 Online Community Group](../../generated_pages/techniques/T0151.002.md) |  IT00000466 This report examines internal Facebook communications which reveal employees’ concerns about how the platform’s algorithm was recommending users join extremist conspiracy groups.

In summer 2019, a new Facebook user named Carol Smith signed up for the platform, describing herself as a politically conservative mother from Wilmington, North Carolina. Smith’s account indicated an interest in politics, parenting and Christianity and followed a few of her favorite brands, including Fox News and then-President Donald Trump.

Though Smith had never expressed interest in conspiracy theories, in just two days Facebook was recommending she join groups dedicated to QAnon, a sprawling and baseless conspiracy theory and movement that claimed Trump was secretly saving the world from a cabal of pedophiles and Satanists.

Smith didn’t follow the recommended QAnon groups, but whatever algorithm Facebook was using to determine how she should engage with the platform pushed ahead just the same. Within one week, Smith’s feed was full of groups and pages that had violated Facebook’s own rules, including those against hate speech and disinformation.

Smith wasn’t a real person. A researcher employed by Facebook invented the account, along with those of other fictitious “test users” in 2019 and 2020, as part of an experiment in studying the platform’s role in misinforming and polarizing users through its recommendations systems.

That researcher said Smith’s Facebook experience was “a barrage of extreme, conspiratorial, and graphic content.”


Facebook’s Algorithm suggested users join groups which supported the QAnon movement (T0151.001: Social Media Platform, T0151.002: Online Community Group, T0153.006: Content Recommendation Algorithm, T0097.208: Social Cause Persona).

Further investigation by Facebook uncovered that its advertising platform had been used to promote QAnon narratives (T0146: Account Asset, T0114: Deliver Ads, T0153.005: Online Advertising Platform):

For years, company researchers had been running experiments like Carol Smith’s to gauge the platform’s hand in radicalizing users, according to the documents seen by NBC News.

This internal work repeatedly found that recommendation tools pushed users into extremist groups, findings that helped inform policy changes and tweaks to recommendations and news feed rankings. Those rankings are a tentacled, ever-evolving system widely known as “the algorithm” that pushes content to users. But the research at that time stopped well short of inspiring any movement to change the groups and pages themselves.

That reluctance was indicative of “a pattern at Facebook,” Haugen told reporters this month. “They want the shortest path between their current policies and any action.”

[...]

By summer 2020, Facebook was hosting thousands of private QAnon groups and pages, with millions of members and followers, according to an unreleased internal investigation.

A year after the FBI designated QAnon as a potential domestic terrorist threat in the wake of standoffs, alleged planned kidnappings, harassment campaigns and shootings, Facebook labeled QAnon a “Violence Inciting Conspiracy Network” and banned it from the platform, along with militias and other violent social movements. A small team working across several of Facebook’s departments found its platforms had hosted hundreds of ads on Facebook and Instagram worth thousands of dollars and millions of views, “praising, supporting, or representing” the conspiracy theory.

[...]

For many employees inside Facebook, the enforcement came too late, according to posts left on Workplace, the company’s internal message board.

“We’ve known for over a year now that our recommendation systems can very quickly lead users down the path to conspiracy theories and groups,” one integrity researcher, whose name had been redacted, wrote in a post announcing she was leaving the company. “This fringe group has grown to national prominence, with QAnon congressional candidates and QAnon hashtags and groups trending in the mainstream. We were willing to act only * after * things had spiraled into a dire state.”

While Facebook’s ban initially appeared effective, a problem remained: The removal of groups and pages didn’t wipe out QAnon’s most extreme followers, who continued to organize on the platform.

“There was enough evidence to raise red flags in the expert community that Facebook and other platforms failed to address QAnon’s violent extremist dimension,” said Marc-André Argentino, a research fellow at King’s College London’s International Centre for the Study of Radicalisation, who has extensively studied QAnon.

Believers simply rebranded as anti-child-trafficking groups or migrated to other communities, including those around the anti-vaccine movement.

[...]

These conspiracy groups had become the fastest-growing groups on Facebook, according to the report, but Facebook wasn’t able to control their “meteoric growth,” the researchers wrote, “because we were looking at each entity individually, rather than as a cohesive movement.” A Facebook spokesperson told BuzzFeed News it took many steps to limit election misinformation but that it was unable to catch everything.
| +| [T0153.005 Online Advertising Platform](../../generated_pages/techniques/T0153.005.md) |  IT00000470 This report examines internal Facebook communications which reveal employees’ concerns about how the platform’s algorithm was recommending users join extremist conspiracy groups.

In summer 2019, a new Facebook user named Carol Smith signed up for the platform, describing herself as a politically conservative mother from Wilmington, North Carolina. Smith’s account indicated an interest in politics, parenting and Christianity and followed a few of her favorite brands, including Fox News and then-President Donald Trump.

Though Smith had never expressed interest in conspiracy theories, in just two days Facebook was recommending she join groups dedicated to QAnon, a sprawling and baseless conspiracy theory and movement that claimed Trump was secretly saving the world from a cabal of pedophiles and Satanists.

Smith didn’t follow the recommended QAnon groups, but whatever algorithm Facebook was using to determine how she should engage with the platform pushed ahead just the same. Within one week, Smith’s feed was full of groups and pages that had violated Facebook’s own rules, including those against hate speech and disinformation.

Smith wasn’t a real person. A researcher employed by Facebook invented the account, along with those of other fictitious “test users” in 2019 and 2020, as part of an experiment in studying the platform’s role in misinforming and polarizing users through its recommendations systems.

That researcher said Smith’s Facebook experience was “a barrage of extreme, conspiratorial, and graphic content.”


Facebook’s Algorithm suggested users join groups which supported the QAnon movement (T0151.001: Social Media Platform, T0151.002: Online Community Group, T0153.006: Content Recommendation Algorithm, T0097.208: Social Cause Persona).

Further investigation by Facebook uncovered that its advertising platform had been used to promote QAnon narratives (T0146: Account Asset, T0114: Deliver Ads, T0153.005: Online Advertising Platform):

For years, company researchers had been running experiments like Carol Smith’s to gauge the platform’s hand in radicalizing users, according to the documents seen by NBC News.

This internal work repeatedly found that recommendation tools pushed users into extremist groups, findings that helped inform policy changes and tweaks to recommendations and news feed rankings. Those rankings are a tentacled, ever-evolving system widely known as “the algorithm” that pushes content to users. But the research at that time stopped well short of inspiring any movement to change the groups and pages themselves.

That reluctance was indicative of “a pattern at Facebook,” Haugen told reporters this month. “They want the shortest path between their current policies and any action.”

[...]

By summer 2020, Facebook was hosting thousands of private QAnon groups and pages, with millions of members and followers, according to an unreleased internal investigation.

A year after the FBI designated QAnon as a potential domestic terrorist threat in the wake of standoffs, alleged planned kidnappings, harassment campaigns and shootings, Facebook labeled QAnon a “Violence Inciting Conspiracy Network” and banned it from the platform, along with militias and other violent social movements. A small team working across several of Facebook’s departments found its platforms had hosted hundreds of ads on Facebook and Instagram worth thousands of dollars and millions of views, “praising, supporting, or representing” the conspiracy theory.

[...]

For many employees inside Facebook, the enforcement came too late, according to posts left on Workplace, the company’s internal message board.

“We’ve known for over a year now that our recommendation systems can very quickly lead users down the path to conspiracy theories and groups,” one integrity researcher, whose name had been redacted, wrote in a post announcing she was leaving the company. “This fringe group has grown to national prominence, with QAnon congressional candidates and QAnon hashtags and groups trending in the mainstream. We were willing to act only * after * things had spiraled into a dire state.”

While Facebook’s ban initially appeared effective, a problem remained: The removal of groups and pages didn’t wipe out QAnon’s most extreme followers, who continued to organize on the platform.

“There was enough evidence to raise red flags in the expert community that Facebook and other platforms failed to address QAnon’s violent extremist dimension,” said Marc-André Argentino, a research fellow at King’s College London’s International Centre for the Study of Radicalisation, who has extensively studied QAnon.

Believers simply rebranded as anti-child-trafficking groups or migrated to other communities, including those around the anti-vaccine movement.

[...]

These conspiracy groups had become the fastest-growing groups on Facebook, according to the report, but Facebook wasn’t able to control their “meteoric growth,” the researchers wrote, “because we were looking at each entity individually, rather than as a cohesive movement.” A Facebook spokesperson told BuzzFeed News it took many steps to limit election misinformation but that it was unable to catch everything.
| +| [T0153.006 Content Recommendation Algorithm](../../generated_pages/techniques/T0153.006.md) |  IT00000467 This report examines internal Facebook communications which reveal employees’ concerns about how the platform’s algorithm was recommending users join extremist conspiracy groups.

In summer 2019, a new Facebook user named Carol Smith signed up for the platform, describing herself as a politically conservative mother from Wilmington, North Carolina. Smith’s account indicated an interest in politics, parenting and Christianity and followed a few of her favorite brands, including Fox News and then-President Donald Trump.

Though Smith had never expressed interest in conspiracy theories, in just two days Facebook was recommending she join groups dedicated to QAnon, a sprawling and baseless conspiracy theory and movement that claimed Trump was secretly saving the world from a cabal of pedophiles and Satanists.

Smith didn’t follow the recommended QAnon groups, but whatever algorithm Facebook was using to determine how she should engage with the platform pushed ahead just the same. Within one week, Smith’s feed was full of groups and pages that had violated Facebook’s own rules, including those against hate speech and disinformation.

Smith wasn’t a real person. A researcher employed by Facebook invented the account, along with those of other fictitious “test users” in 2019 and 2020, as part of an experiment in studying the platform’s role in misinforming and polarizing users through its recommendations systems.

That researcher said Smith’s Facebook experience was “a barrage of extreme, conspiratorial, and graphic content.”


Facebook’s Algorithm suggested users join groups which supported the QAnon movement (T0151.001: Social Media Platform, T0151.002: Online Community Group, T0153.006: Content Recommendation Algorithm, T0097.208: Social Cause Persona).

Further investigation by Facebook uncovered that its advertising platform had been used to promote QAnon narratives (T0146: Account Asset, T0114: Deliver Ads, T0153.005: Online Advertising Platform):

For years, company researchers had been running experiments like Carol Smith’s to gauge the platform’s hand in radicalizing users, according to the documents seen by NBC News.

This internal work repeatedly found that recommendation tools pushed users into extremist groups, findings that helped inform policy changes and tweaks to recommendations and news feed rankings. Those rankings are a tentacled, ever-evolving system widely known as “the algorithm” that pushes content to users. But the research at that time stopped well short of inspiring any movement to change the groups and pages themselves.

That reluctance was indicative of “a pattern at Facebook,” Haugen told reporters this month. “They want the shortest path between their current policies and any action.”

[...]

By summer 2020, Facebook was hosting thousands of private QAnon groups and pages, with millions of members and followers, according to an unreleased internal investigation.

A year after the FBI designated QAnon as a potential domestic terrorist threat in the wake of standoffs, alleged planned kidnappings, harassment campaigns and shootings, Facebook labeled QAnon a “Violence Inciting Conspiracy Network” and banned it from the platform, along with militias and other violent social movements. A small team working across several of Facebook’s departments found its platforms had hosted hundreds of ads on Facebook and Instagram worth thousands of dollars and millions of views, “praising, supporting, or representing” the conspiracy theory.

[...]

For many employees inside Facebook, the enforcement came too late, according to posts left on Workplace, the company’s internal message board.

“We’ve known for over a year now that our recommendation systems can very quickly lead users down the path to conspiracy theories and groups,” one integrity researcher, whose name had been redacted, wrote in a post announcing she was leaving the company. “This fringe group has grown to national prominence, with QAnon congressional candidates and QAnon hashtags and groups trending in the mainstream. We were willing to act only * after * things had spiraled into a dire state.”

While Facebook’s ban initially appeared effective, a problem remained: The removal of groups and pages didn’t wipe out QAnon’s most extreme followers, who continued to organize on the platform.

“There was enough evidence to raise red flags in the expert community that Facebook and other platforms failed to address QAnon’s violent extremist dimension,” said Marc-André Argentino, a research fellow at King’s College London’s International Centre for the Study of Radicalisation, who has extensively studied QAnon.

Believers simply rebranded as anti-child-trafficking groups or migrated to other communities, including those around the anti-vaccine movement.

[...]

These conspiracy groups had become the fastest-growing groups on Facebook, according to the report, but Facebook wasn’t able to control their “meteoric growth,” the researchers wrote, “because we were looking at each entity individually, rather than as a cohesive movement.” A Facebook spokesperson told BuzzFeed News it took many steps to limit election misinformation but that it was unable to catch everything.
| DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00115.md b/generated_pages/incidents/I00115.md index d16d52c..9c0f9fd 100644 --- a/generated_pages/incidents/I00115.md +++ b/generated_pages/incidents/I00115.md @@ -15,13 +15,14 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://www.washingtonpost.com/technology/interactive/2021/how-facebook-algorithm-works/](https://www.washingtonpost.com/technology/interactive/2021/how-facebook-algorithm-works/) | 2021/10/26 | Will Oremus, Chris Alcantara, Jeremy B. Merrill, Artur Galocha | The Washington Post | [https://web.archive.org/web/20211026142012/https://www.washingtonpost.com/technology/interactive/2021/how-facebook-algorithm-works/](https://web.archive.org/web/20211026142012/https://www.washingtonpost.com/technology/interactive/2021/how-facebook-algorithm-works/) | | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0151.001 Social Media Platform](../../generated_pages/techniques/T0151.001.md) |  IT00000471 This 2021 report by The Washington Post explains the mechanics of Facebook’s algorithm (T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm):

In its early years, Facebook’s algorithm prioritized signals such as likes, clicks and comments to decide which posts to amplify. Publishers, brands and individual users soon learned how to craft posts and headlines designed to induce likes and clicks, giving rise to what came to be known as “clickbait.” By 2013, upstart publishers such as Upworthy and ViralNova were amassing tens of millions of readers with articles designed specifically to game Facebook’s news feed algorithm.

Facebook realized that users were growing wary of misleading teaser headlines, and the company recalibrated its algorithm in 2014 and 2015 to downgrade clickbait and focus on new metrics, such as the amount of time a user spent reading a story or watching a video, and incorporating surveys on what content users found most valuable. Around the same time, its executives identified video as a business priority, and used the algorithm to boost “native” videos shared directly to Facebook. By the mid-2010s, the news feed had tilted toward slick, professionally produced content, especially videos that would hold people’s attention.

In 2016, however, Facebook executives grew worried about a decline in “original sharing.” Users were spending so much time passively watching and reading that they weren’t interacting with each other as much. Young people in particular shifted their personal conversations to rivals such as Snapchat that offered more intimacy.

Once again, Facebook found its answer in the algorithm: It developed a new set of goal metrics that it called “meaningful social interactions,” designed to show users more posts from friends and family, and fewer from big publishers and brands. In particular, the algorithm began to give outsize weight to posts that sparked lots of comments and replies.

The downside of this approach was that the posts that sparked the most comments tended to be the ones that made people angry or offended them, the documents show. Facebook became an angrier, more polarizing place. It didn’t help that, starting in 2017, the algorithm had assigned reaction emoji — including the angry emoji — five times the weight of a simple “like,” according to company documents.

[...]

Internal documents show Facebook researchers found that, for the most politically oriented 1 million American users, nearly 90 percent of the content that Facebook shows them is about politics and social issues. Those groups also received the most misinformation, especially a set of users associated with mostly right-leaning content, who were shown one misinformation post out of every 40, according to a document from June 2020.

One takeaway is that Facebook’s algorithm isn’t a runaway train. The company may not directly control what any given user posts, but by choosing which types of posts will be seen, it sculpts the information landscape according to its business priorities. Some within the company would like to see Facebook use the algorithm to explicitly promote certain values, such as democracy and civil discourse. Others have suggested that it develop and prioritize new metrics that align with users’ values, as with a 2020 experiment in which the algorithm was trained to predict what posts they would find “good for the world” and “bad for the world,” and optimize for the former.
| -| [T0153.006 Content Recommendation Algorithm](../../generated_pages/techniques/T0153.006.md) |  IT00000472 This 2021 report by The Washington Post explains the mechanics of Facebook’s algorithm (T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm):

In its early years, Facebook’s algorithm prioritized signals such as likes, clicks and comments to decide which posts to amplify. Publishers, brands and individual users soon learned how to craft posts and headlines designed to induce likes and clicks, giving rise to what came to be known as “clickbait.” By 2013, upstart publishers such as Upworthy and ViralNova were amassing tens of millions of readers with articles designed specifically to game Facebook’s news feed algorithm.

Facebook realized that users were growing wary of misleading teaser headlines, and the company recalibrated its algorithm in 2014 and 2015 to downgrade clickbait and focus on new metrics, such as the amount of time a user spent reading a story or watching a video, and incorporating surveys on what content users found most valuable. Around the same time, its executives identified video as a business priority, and used the algorithm to boost “native” videos shared directly to Facebook. By the mid-2010s, the news feed had tilted toward slick, professionally produced content, especially videos that would hold people’s attention.

In 2016, however, Facebook executives grew worried about a decline in “original sharing.” Users were spending so much time passively watching and reading that they weren’t interacting with each other as much. Young people in particular shifted their personal conversations to rivals such as Snapchat that offered more intimacy.

Once again, Facebook found its answer in the algorithm: It developed a new set of goal metrics that it called “meaningful social interactions,” designed to show users more posts from friends and family, and fewer from big publishers and brands. In particular, the algorithm began to give outsize weight to posts that sparked lots of comments and replies.

The downside of this approach was that the posts that sparked the most comments tended to be the ones that made people angry or offended them, the documents show. Facebook became an angrier, more polarizing place. It didn’t help that, starting in 2017, the algorithm had assigned reaction emoji — including the angry emoji — five times the weight of a simple “like,” according to company documents.

[...]

Internal documents show Facebook researchers found that, for the most politically oriented 1 million American users, nearly 90 percent of the content that Facebook shows them is about politics and social issues. Those groups also received the most misinformation, especially a set of users associated with mostly right-leaning content, who were shown one misinformation post out of every 40, according to a document from June 2020.

One takeaway is that Facebook’s algorithm isn’t a runaway train. The company may not directly control what any given user posts, but by choosing which types of posts will be seen, it sculpts the information landscape according to its business priorities. Some within the company would like to see Facebook use the algorithm to explicitly promote certain values, such as democracy and civil discourse. Others have suggested that it develop and prioritize new metrics that align with users’ values, as with a 2020 experiment in which the algorithm was trained to predict what posts they would find “good for the world” and “bad for the world,” and optimize for the former.
| +| [T0151.001 Social Media Platform](../../generated_pages/techniques/T0151.001.md) |  IT00000471 This 2021 report by The Washington Post explains the mechanics of Facebook’s algorithm (T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm):

In its early years, Facebook’s algorithm prioritized signals such as likes, clicks and comments to decide which posts to amplify. Publishers, brands and individual users soon learned how to craft posts and headlines designed to induce likes and clicks, giving rise to what came to be known as “clickbait.” By 2013, upstart publishers such as Upworthy and ViralNova were amassing tens of millions of readers with articles designed specifically to game Facebook’s news feed algorithm.

Facebook realized that users were growing wary of misleading teaser headlines, and the company recalibrated its algorithm in 2014 and 2015 to downgrade clickbait and focus on new metrics, such as the amount of time a user spent reading a story or watching a video, and incorporating surveys on what content users found most valuable. Around the same time, its executives identified video as a business priority, and used the algorithm to boost “native” videos shared directly to Facebook. By the mid-2010s, the news feed had tilted toward slick, professionally produced content, especially videos that would hold people’s attention.

In 2016, however, Facebook executives grew worried about a decline in “original sharing.” Users were spending so much time passively watching and reading that they weren’t interacting with each other as much. Young people in particular shifted their personal conversations to rivals such as Snapchat that offered more intimacy.

Once again, Facebook found its answer in the algorithm: It developed a new set of goal metrics that it called “meaningful social interactions,” designed to show users more posts from friends and family, and fewer from big publishers and brands. In particular, the algorithm began to give outsize weight to posts that sparked lots of comments and replies.

The downside of this approach was that the posts that sparked the most comments tended to be the ones that made people angry or offended them, the documents show. Facebook became an angrier, more polarizing place. It didn’t help that, starting in 2017, the algorithm had assigned reaction emoji — including the angry emoji — five times the weight of a simple “like,” according to company documents.

[...]

Internal documents show Facebook researchers found that, for the most politically oriented 1 million American users, nearly 90 percent of the content that Facebook shows them is about politics and social issues. Those groups also received the most misinformation, especially a set of users associated with mostly right-leaning content, who were shown one misinformation post out of every 40, according to a document from June 2020.

One takeaway is that Facebook’s algorithm isn’t a runaway train. The company may not directly control what any given user posts, but by choosing which types of posts will be seen, it sculpts the information landscape according to its business priorities. Some within the company would like to see Facebook use the algorithm to explicitly promote certain values, such as democracy and civil discourse. Others have suggested that it develop and prioritize new metrics that align with users’ values, as with a 2020 experiment in which the algorithm was trained to predict what posts they would find “good for the world” and “bad for the world,” and optimize for the former.
| +| [T0153.006 Content Recommendation Algorithm](../../generated_pages/techniques/T0153.006.md) |  IT00000472 This 2021 report by The Washington Post explains the mechanics of Facebook’s algorithm (T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm):

In its early years, Facebook’s algorithm prioritized signals such as likes, clicks and comments to decide which posts to amplify. Publishers, brands and individual users soon learned how to craft posts and headlines designed to induce likes and clicks, giving rise to what came to be known as “clickbait.” By 2013, upstart publishers such as Upworthy and ViralNova were amassing tens of millions of readers with articles designed specifically to game Facebook’s news feed algorithm.

Facebook realized that users were growing wary of misleading teaser headlines, and the company recalibrated its algorithm in 2014 and 2015 to downgrade clickbait and focus on new metrics, such as the amount of time a user spent reading a story or watching a video, and incorporating surveys on what content users found most valuable. Around the same time, its executives identified video as a business priority, and used the algorithm to boost “native” videos shared directly to Facebook. By the mid-2010s, the news feed had tilted toward slick, professionally produced content, especially videos that would hold people’s attention.

In 2016, however, Facebook executives grew worried about a decline in “original sharing.” Users were spending so much time passively watching and reading that they weren’t interacting with each other as much. Young people in particular shifted their personal conversations to rivals such as Snapchat that offered more intimacy.

Once again, Facebook found its answer in the algorithm: It developed a new set of goal metrics that it called “meaningful social interactions,” designed to show users more posts from friends and family, and fewer from big publishers and brands. In particular, the algorithm began to give outsize weight to posts that sparked lots of comments and replies.

The downside of this approach was that the posts that sparked the most comments tended to be the ones that made people angry or offended them, the documents show. Facebook became an angrier, more polarizing place. It didn’t help that, starting in 2017, the algorithm had assigned reaction emoji — including the angry emoji — five times the weight of a simple “like,” according to company documents.

[...]

Internal documents show Facebook researchers found that, for the most politically oriented 1 million American users, nearly 90 percent of the content that Facebook shows them is about politics and social issues. Those groups also received the most misinformation, especially a set of users associated with mostly right-leaning content, who were shown one misinformation post out of every 40, according to a document from June 2020.

One takeaway is that Facebook’s algorithm isn’t a runaway train. The company may not directly control what any given user posts, but by choosing which types of posts will be seen, it sculpts the information landscape according to its business priorities. Some within the company would like to see Facebook use the algorithm to explicitly promote certain values, such as democracy and civil discourse. Others have suggested that it develop and prioritize new metrics that align with users’ values, as with a 2020 experiment in which the algorithm was trained to predict what posts they would find “good for the world” and “bad for the world,” and optimize for the former.
| DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00116.md b/generated_pages/incidents/I00116.md index 0f644fe..67b9717 100644 --- a/generated_pages/incidents/I00116.md +++ b/generated_pages/incidents/I00116.md @@ -15,19 +15,20 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://www.theguardian.com/technology/2023/aug/27/consumers-complaining-x-targeted-scammers-verification-changes-twitter](https://www.theguardian.com/technology/2023/aug/27/consumers-complaining-x-targeted-scammers-verification-changes-twitter) | 2023/08/27 | Anna Tims | The Guardian | [https://web.archive.org/web/20230827142858/https://www.theguardian.com/technology/2023/aug/27/consumers-complaining-x-targeted-scammers-verification-changes-twitter](https://web.archive.org/web/20230827142858/https://www.theguardian.com/technology/2023/aug/27/consumers-complaining-x-targeted-scammers-verification-changes-twitter) | | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0097.205 Business Persona](../../generated_pages/techniques/T0097.205.md) |  IT00000476 Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account, T0146.003: Verified Account, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created). | -| [T0122 Direct Users to Alternative Platforms](../../generated_pages/techniques/T0122.md) |  IT00000477 Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account, T0146.003: Verified Account, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created). | -| [T0143.003 Impersonated Persona](../../generated_pages/techniques/T0143.003.md) |  IT00000478 Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account, T0146.003: Verified Account, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created). | -| [T0146.002 Paid Account](../../generated_pages/techniques/T0146.002.md) |  IT00000473 Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account, T0146.003: Verified Account, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created). | -| [T0146.003 Verified Account](../../generated_pages/techniques/T0146.003.md) |  IT00000474 Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account, T0146.003: Verified Account, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created). | -| [T0146.005 Lookalike Account ID](../../generated_pages/techniques/T0146.005.md) |  IT00000475 Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account, T0146.003: Verified Account, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created). | -| [T0150.001 Newly Created](../../generated_pages/techniques/T0150.001.md) |  IT00000480 Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account, T0146.003: Verified Account, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created). | -| [T0151.008 Microblogging Platform](../../generated_pages/techniques/T0151.008.md) |  IT00000479 Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account, T0146.003: Verified Account, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created). | +| [T0097.205 Business Persona](../../generated_pages/techniques/T0097.205.md) |  IT00000476 Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account Asset, T0146.003: Verified Account Asset, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created Asset). | +| [T0122 Direct Users to Alternative Platforms](../../generated_pages/techniques/T0122.md) |  IT00000477 Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account Asset, T0146.003: Verified Account Asset, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created Asset). | +| [T0143.003 Impersonated Persona](../../generated_pages/techniques/T0143.003.md) |  IT00000478 Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account Asset, T0146.003: Verified Account Asset, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created Asset). | +| [T0146.002 Paid Account Asset](../../generated_pages/techniques/T0146.002.md) |  IT00000473 Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account Asset, T0146.003: Verified Account Asset, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created Asset). | +| [T0146.003 Verified Account Asset](../../generated_pages/techniques/T0146.003.md) |  IT00000474 Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account Asset, T0146.003: Verified Account Asset, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created Asset). | +| [T0146.005 Lookalike Account ID](../../generated_pages/techniques/T0146.005.md) |  IT00000475 Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account Asset, T0146.003: Verified Account Asset, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created Asset). | +| [T0150.001 Newly Created Asset](../../generated_pages/techniques/T0150.001.md) |  IT00000480 Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account Asset, T0146.003: Verified Account Asset, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created Asset). | +| [T0151.008 Microblogging Platform](../../generated_pages/techniques/T0151.008.md) |  IT00000479 Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account Asset, T0146.003: Verified Account Asset, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created Asset). | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00117.md b/generated_pages/incidents/I00117.md index 30b7e6d..3f8bd1f 100644 --- a/generated_pages/incidents/I00117.md +++ b/generated_pages/incidents/I00117.md @@ -15,6 +15,7 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://www.bellingcat.com/news/americas/2019/08/04/the-el-paso-shooting-and-the-gamification-of-terror/](https://www.bellingcat.com/news/americas/2019/08/04/the-el-paso-shooting-and-the-gamification-of-terror/) | 2019/08/04 | Robert Evans | bellingcat | [https://web.archive.org/web/20190804002224/https://www.bellingcat.com/news/americas/2019/08/04/the-el-paso-shooting-and-the-gamification-of-terror/](https://web.archive.org/web/20190804002224/https://www.bellingcat.com/news/americas/2019/08/04/the-el-paso-shooting-and-the-gamification-of-terror/) | diff --git a/generated_pages/incidents/I00118.md b/generated_pages/incidents/I00118.md index 60b3d63..8d13a3c 100644 --- a/generated_pages/incidents/I00118.md +++ b/generated_pages/incidents/I00118.md @@ -15,17 +15,18 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://taskandpurpose.com/news/war-thunder-forum-military-technology-leaks/](https://taskandpurpose.com/news/war-thunder-forum-military-technology-leaks/) | 2023/01/20 | Max Hauptman | Task & Purpose | [https://web.archive.org/web/20241116165206/https://taskandpurpose.com/news/war-thunder-forum-military-technology-leaks/](https://web.archive.org/web/20241116165206/https://taskandpurpose.com/news/war-thunder-forum-military-technology-leaks/) | | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0089.001 Obtain Authentic Documents](../../generated_pages/techniques/T0089.001.md) |  IT00000481 In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.

This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.

A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.

The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.

The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual.

[...]

A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.


A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). | -| [T0097.105 Military Personnel Persona](../../generated_pages/techniques/T0097.105.md) |  IT00000483 In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.

This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.

A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.

The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.

The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual.

[...]

A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.


A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). | -| [T0115 Post Content](../../generated_pages/techniques/T0115.md) |  IT00000484 In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.

This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.

A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.

The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.

The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual.

[...]

A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.


A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). | -| [T0143.001 Authentic Persona](../../generated_pages/techniques/T0143.001.md) |  IT00000485 In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.

This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.

A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.

The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.

The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual.

[...]

A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.


A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). | -| [T0146 Account](../../generated_pages/techniques/T0146.md) |  IT00000482 In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.

This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.

A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.

The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.

The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual.

[...]

A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.


A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). | -| [T0151.009 Legacy Online Forum Platform](../../generated_pages/techniques/T0151.009.md) |  IT00000486 In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.

This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.

A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.

The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.

The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual.

[...]

A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.


A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). | +| [T0089.001 Obtain Authentic Documents](../../generated_pages/techniques/T0089.001.md) |  IT00000481 In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.

This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.

A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.

The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.

The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual.

[...]

A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.


A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account Asset, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). | +| [T0097.105 Military Personnel Persona](../../generated_pages/techniques/T0097.105.md) |  IT00000483 In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.

This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.

A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.

The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.

The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual.

[...]

A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.


A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account Asset, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). | +| [T0115 Post Content](../../generated_pages/techniques/T0115.md) |  IT00000484 In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.

This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.

A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.

The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.

The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual.

[...]

A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.


A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account Asset, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). | +| [T0143.001 Authentic Persona](../../generated_pages/techniques/T0143.001.md) |  IT00000485 In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.

This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.

A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.

The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.

The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual.

[...]

A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.


A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account Asset, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). | +| [T0146 Account Asset](../../generated_pages/techniques/T0146.md) |  IT00000482 In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.

This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.

A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.

The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.

The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual.

[...]

A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.


A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account Asset, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). | +| [T0151.009 Legacy Online Forum Platform](../../generated_pages/techniques/T0151.009.md) |  IT00000486 In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.

This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.

A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.

The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.

The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual.

[...]

A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.


A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account Asset, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00119.md b/generated_pages/incidents/I00119.md index ea88b84..8278bd7 100644 --- a/generated_pages/incidents/I00119.md +++ b/generated_pages/incidents/I00119.md @@ -15,19 +15,20 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://www.nbcnews.com/tech/security/ken-klippenstein-publishes-irans-hacked-trump-campaign-document-substa-rcna172902](https://www.nbcnews.com/tech/security/ken-klippenstein-publishes-irans-hacked-trump-campaign-document-substa-rcna172902) | 2024/09/26 | Kevin Collier | NBC News | [https://web.archive.org/web/20240927013805/https://www.nbcnews.com/tech/security/ken-klippenstein-publishes-irans-hacked-trump-campaign-document-substa-rcna172902](https://web.archive.org/web/20240927013805/https://www.nbcnews.com/tech/security/ken-klippenstein-publishes-irans-hacked-trump-campaign-document-substa-rcna172902) | | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0089 Obtain Private Documents](../../generated_pages/techniques/T0089.md) |  IT00000489 An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog, T0150.003: Pre-Existing). | -| [T0097.100 Individual Persona](../../generated_pages/techniques/T0097.100.md) |  IT00000488 An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog, T0150.003: Pre-Existing). | -| [T0097.102 Journalist Persona](../../generated_pages/techniques/T0097.102.md) |  IT00000490 An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog, T0150.003: Pre-Existing). | -| [T0143.001 Authentic Persona](../../generated_pages/techniques/T0143.001.md) |  IT00000491 An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog, T0150.003: Pre-Existing). | -| [T0150.003 Pre-Existing](../../generated_pages/techniques/T0150.003.md) |  IT00000494 An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog, T0150.003: Pre-Existing). | -| [T0152.001 Blogging Platform](../../generated_pages/techniques/T0152.001.md) |  IT00000492 An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog, T0150.003: Pre-Existing). | -| [T0152.002 Blog](../../generated_pages/techniques/T0152.002.md) |  IT00000493 An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog, T0150.003: Pre-Existing). | -| [T0153.001 Email Platform](../../generated_pages/techniques/T0153.001.md) |  IT00000487 An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog, T0150.003: Pre-Existing). | +| [T0089 Obtain Private Documents](../../generated_pages/techniques/T0089.md) |  IT00000489 An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account Asset, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog Asset, T0150.003: Pre-Existing Asset). | +| [T0097.100 Individual Persona](../../generated_pages/techniques/T0097.100.md) |  IT00000488 An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account Asset, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog Asset, T0150.003: Pre-Existing Asset). | +| [T0097.102 Journalist Persona](../../generated_pages/techniques/T0097.102.md) |  IT00000490 An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account Asset, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog Asset, T0150.003: Pre-Existing Asset). | +| [T0143.001 Authentic Persona](../../generated_pages/techniques/T0143.001.md) |  IT00000491 An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account Asset, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog Asset, T0150.003: Pre-Existing Asset). | +| [T0150.003 Pre-Existing Asset](../../generated_pages/techniques/T0150.003.md) |  IT00000494 An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account Asset, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog Asset, T0150.003: Pre-Existing Asset). | +| [T0152.001 Blogging Platform](../../generated_pages/techniques/T0152.001.md) |  IT00000492 An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account Asset, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog Asset, T0150.003: Pre-Existing Asset). | +| [T0152.002 Blog Asset](../../generated_pages/techniques/T0152.002.md) |  IT00000493 An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account Asset, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog Asset, T0150.003: Pre-Existing Asset). | +| [T0153.001 Email Platform](../../generated_pages/techniques/T0153.001.md) |  IT00000487 An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account Asset, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog Asset, T0150.003: Pre-Existing Asset). | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00120.md b/generated_pages/incidents/I00120.md index 9313338..d99d934 100644 --- a/generated_pages/incidents/I00120.md +++ b/generated_pages/incidents/I00120.md @@ -15,16 +15,17 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [-](-) | 2019/11/21 | Dr Claire Hardaker | - | [https://web.archive.org/web/20220208165928/https://wp.lancs.ac.uk/drclaireh/2019/11/21/factcheckuk-or-fakecheckuk-reinventing-the-political-faction-as-the-impartial-factchecker/](https://web.archive.org/web/20220208165928/https://wp.lancs.ac.uk/drclaireh/2019/11/21/factcheckuk-or-fakecheckuk-reinventing-the-political-faction-as-the-impartial-factchecker/) | | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0097.203 Fact Checking Organisation Persona](../../generated_pages/techniques/T0097.203.md) |  IT00000497 Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:


That is, until a few minutes into the debate.

All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.

The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.

The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it

In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing, T0146.003: Verified Account, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). | -| [T0143.002 Fabricated Persona](../../generated_pages/techniques/T0143.002.md) |  IT00000498 Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:


That is, until a few minutes into the debate.

All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.

The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.

The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it

In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing, T0146.003: Verified Account, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). | -| [T0146.003 Verified Account](../../generated_pages/techniques/T0146.003.md) |  IT00000499 Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:


That is, until a few minutes into the debate.

All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.

The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.

The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it

In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing, T0146.003: Verified Account, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). | -| [T0150.003 Pre-Existing](../../generated_pages/techniques/T0150.003.md) |  IT00000496 Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:


That is, until a few minutes into the debate.

All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.

The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.

The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it

In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing, T0146.003: Verified Account, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). | -| [T0151.008 Microblogging Platform](../../generated_pages/techniques/T0151.008.md) |  IT00000495 Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:


That is, until a few minutes into the debate.

All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.

The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.

The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it

In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing, T0146.003: Verified Account, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). | +| [T0097.203 Fact Checking Organisation Persona](../../generated_pages/techniques/T0097.203.md) |  IT00000497 Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:

The evening of the 19th November 2019 saw the first of three Leaders’ Debates on ITV, starting at 8pm and lasting for an hour. Current Prime Minister and leader of the Conservatives, Boris Johnson faced off against Labour party leader, Jeremy Corbyn. Plenty of people will have been watching the debate live, but a good proportion were “watching” (er, “twitching”?) via Twitter. This is something I’ve done in the past for certain shows. In some cases I just can’t watch or listen, but I can read, and in other cases, the commentary is far more interesting and entertaining than the show itself will ever be. This, for me, is just such a case. But very quickly, all eyes turned upon a modestly sized account with the handle @CCHQPress. That’s short for Conservative Campaign Headquarters Press. According to their (current!) Twitter bio, they are based in Westminster and they provide “snippets of news and commentary from CCHQ” to their 75k followers.

That is, until a few minutes into the debate.

All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.

The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.

The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it


In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing Asset, T0146.003: Verified Account Asset, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). | +| [T0143.002 Fabricated Persona](../../generated_pages/techniques/T0143.002.md) |  IT00000498 Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:

The evening of the 19th November 2019 saw the first of three Leaders’ Debates on ITV, starting at 8pm and lasting for an hour. Current Prime Minister and leader of the Conservatives, Boris Johnson faced off against Labour party leader, Jeremy Corbyn. Plenty of people will have been watching the debate live, but a good proportion were “watching” (er, “twitching”?) via Twitter. This is something I’ve done in the past for certain shows. In some cases I just can’t watch or listen, but I can read, and in other cases, the commentary is far more interesting and entertaining than the show itself will ever be. This, for me, is just such a case. But very quickly, all eyes turned upon a modestly sized account with the handle @CCHQPress. That’s short for Conservative Campaign Headquarters Press. According to their (current!) Twitter bio, they are based in Westminster and they provide “snippets of news and commentary from CCHQ” to their 75k followers.

That is, until a few minutes into the debate.

All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.

The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.

The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it


In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing Asset, T0146.003: Verified Account Asset, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). | +| [T0146.003 Verified Account Asset](../../generated_pages/techniques/T0146.003.md) |  IT00000499 Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:

The evening of the 19th November 2019 saw the first of three Leaders’ Debates on ITV, starting at 8pm and lasting for an hour. Current Prime Minister and leader of the Conservatives, Boris Johnson faced off against Labour party leader, Jeremy Corbyn. Plenty of people will have been watching the debate live, but a good proportion were “watching” (er, “twitching”?) via Twitter. This is something I’ve done in the past for certain shows. In some cases I just can’t watch or listen, but I can read, and in other cases, the commentary is far more interesting and entertaining than the show itself will ever be. This, for me, is just such a case. But very quickly, all eyes turned upon a modestly sized account with the handle @CCHQPress. That’s short for Conservative Campaign Headquarters Press. According to their (current!) Twitter bio, they are based in Westminster and they provide “snippets of news and commentary from CCHQ” to their 75k followers.

That is, until a few minutes into the debate.

All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.

The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.

The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it


In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing Asset, T0146.003: Verified Account Asset, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). | +| [T0150.003 Pre-Existing Asset](../../generated_pages/techniques/T0150.003.md) |  IT00000496 Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:

The evening of the 19th November 2019 saw the first of three Leaders’ Debates on ITV, starting at 8pm and lasting for an hour. Current Prime Minister and leader of the Conservatives, Boris Johnson faced off against Labour party leader, Jeremy Corbyn. Plenty of people will have been watching the debate live, but a good proportion were “watching” (er, “twitching”?) via Twitter. This is something I’ve done in the past for certain shows. In some cases I just can’t watch or listen, but I can read, and in other cases, the commentary is far more interesting and entertaining than the show itself will ever be. This, for me, is just such a case. But very quickly, all eyes turned upon a modestly sized account with the handle @CCHQPress. That’s short for Conservative Campaign Headquarters Press. According to their (current!) Twitter bio, they are based in Westminster and they provide “snippets of news and commentary from CCHQ” to their 75k followers.

That is, until a few minutes into the debate.

All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.

The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.

The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it


In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing Asset, T0146.003: Verified Account Asset, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). | +| [T0151.008 Microblogging Platform](../../generated_pages/techniques/T0151.008.md) |  IT00000495 Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:

The evening of the 19th November 2019 saw the first of three Leaders’ Debates on ITV, starting at 8pm and lasting for an hour. Current Prime Minister and leader of the Conservatives, Boris Johnson faced off against Labour party leader, Jeremy Corbyn. Plenty of people will have been watching the debate live, but a good proportion were “watching” (er, “twitching”?) via Twitter. This is something I’ve done in the past for certain shows. In some cases I just can’t watch or listen, but I can read, and in other cases, the commentary is far more interesting and entertaining than the show itself will ever be. This, for me, is just such a case. But very quickly, all eyes turned upon a modestly sized account with the handle @CCHQPress. That’s short for Conservative Campaign Headquarters Press. According to their (current!) Twitter bio, they are based in Westminster and they provide “snippets of news and commentary from CCHQ” to their 75k followers.

That is, until a few minutes into the debate.

All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.

The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.

The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it


In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing Asset, T0146.003: Verified Account Asset, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00121.md b/generated_pages/incidents/I00121.md index 89ea431..e633a42 100644 --- a/generated_pages/incidents/I00121.md +++ b/generated_pages/incidents/I00121.md @@ -15,14 +15,15 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://checkfirst.network/wp-content/uploads/2024/06/Operation_Overload_WEB.pdf](https://checkfirst.network/wp-content/uploads/2024/06/Operation_Overload_WEB.pdf) | 2024/06/01 | Aleksandra Atanasova, Amaury Lesplingart, Francesco Poldi, Guillaume Kuster | CheckFirst | [https://web.archive.org/web/20240928205427/https://checkfirst.network/wp-content/uploads/2024/06/Operation_Overload_WEB.pdf](https://web.archive.org/web/20240928205427/https://checkfirst.network/wp-content/uploads/2024/06/Operation_Overload_WEB.pdf) | | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0143.002 Fabricated Persona](../../generated_pages/techniques/T0143.002.md) |  IT00000501 The unique aspect of Operation Overload is a barrage of emails sent to newsrooms and fact-checkers across Europe. The authors of these messages urge recipients to verify content allegedly found online. The email subject lines often include an incitement to verify the claims briefly described in the message body. This is followed by a short list of links directing recipients to posts on Telegram, X, or known pro-Russian websites, including Pravda and Sputnik.

We have collected 221 emails sent to 20 organisations. The organisations mostly received identical emails urging them to fact-check specific false stories, which demonstrates that the emails were sent as part of a larger coordinated campaign.

[...]

The authors of the emails do not hide their intention to see the fake content widely spread. In February 2024, a journalist at the German outlet CORRECTIV engaged with the sender of one of the emails, providing feedback on the narratives which were originally sent. CORRECTIV received a response from the same Gmail address, initially expressing respect and trust in CORRECTIV’s assessment, while asking: “is it possible for your work to be seen by as many people as possible?”, thereby clearly stating the goal of the operation.

[...]

All the emails come from authors posing as concerned citizens. All emails are sent with Gmail accounts, which is typical for personal use. This makes it challenging to identify the individuals behind these emails, as anyone can open a Gmail account for free. The email headers indicate that the messages were sent from the Gmail interface, not from a personal client which would disclose the sender’s IP address.


In this example, threat actors used gmail accounts (T0146.001: Free Account, T0097.100: Individual Persona, T0143.002: Fabricated Persona, T0153.001: Email Platform) to target journalists and fact-checkers, with the apparent goal of having them amplify operation narratives through fact checks. | -| [T0146.001 Free Account](../../generated_pages/techniques/T0146.001.md) |  IT00000500 The unique aspect of Operation Overload is a barrage of emails sent to newsrooms and fact-checkers across Europe. The authors of these messages urge recipients to verify content allegedly found online. The email subject lines often include an incitement to verify the claims briefly described in the message body. This is followed by a short list of links directing recipients to posts on Telegram, X, or known pro-Russian websites, including Pravda and Sputnik.

We have collected 221 emails sent to 20 organisations. The organisations mostly received identical emails urging them to fact-check specific false stories, which demonstrates that the emails were sent as part of a larger coordinated campaign.

[...]

The authors of the emails do not hide their intention to see the fake content widely spread. In February 2024, a journalist at the German outlet CORRECTIV engaged with the sender of one of the emails, providing feedback on the narratives which were originally sent. CORRECTIV received a response from the same Gmail address, initially expressing respect and trust in CORRECTIV’s assessment, while asking: “is it possible for your work to be seen by as many people as possible?”, thereby clearly stating the goal of the operation.

[...]

All the emails come from authors posing as concerned citizens. All emails are sent with Gmail accounts, which is typical for personal use. This makes it challenging to identify the individuals behind these emails, as anyone can open a Gmail account for free. The email headers indicate that the messages were sent from the Gmail interface, not from a personal client which would disclose the sender’s IP address.


In this example, threat actors used gmail accounts (T0146.001: Free Account, T0097.100: Individual Persona, T0143.002: Fabricated Persona, T0153.001: Email Platform) to target journalists and fact-checkers, with the apparent goal of having them amplify operation narratives through fact checks. | -| [T0153.001 Email Platform](../../generated_pages/techniques/T0153.001.md) |  IT00000502 The unique aspect of Operation Overload is a barrage of emails sent to newsrooms and fact-checkers across Europe. The authors of these messages urge recipients to verify content allegedly found online. The email subject lines often include an incitement to verify the claims briefly described in the message body. This is followed by a short list of links directing recipients to posts on Telegram, X, or known pro-Russian websites, including Pravda and Sputnik.

We have collected 221 emails sent to 20 organisations. The organisations mostly received identical emails urging them to fact-check specific false stories, which demonstrates that the emails were sent as part of a larger coordinated campaign.

[...]

The authors of the emails do not hide their intention to see the fake content widely spread. In February 2024, a journalist at the German outlet CORRECTIV engaged with the sender of one of the emails, providing feedback on the narratives which were originally sent. CORRECTIV received a response from the same Gmail address, initially expressing respect and trust in CORRECTIV’s assessment, while asking: “is it possible for your work to be seen by as many people as possible?”, thereby clearly stating the goal of the operation.

[...]

All the emails come from authors posing as concerned citizens. All emails are sent with Gmail accounts, which is typical for personal use. This makes it challenging to identify the individuals behind these emails, as anyone can open a Gmail account for free. The email headers indicate that the messages were sent from the Gmail interface, not from a personal client which would disclose the sender’s IP address.


In this example, threat actors used gmail accounts (T0146.001: Free Account, T0097.100: Individual Persona, T0143.002: Fabricated Persona, T0153.001: Email Platform) to target journalists and fact-checkers, with the apparent goal of having them amplify operation narratives through fact checks. | +| [T0143.002 Fabricated Persona](../../generated_pages/techniques/T0143.002.md) |  IT00000501 The unique aspect of Operation Overload is a barrage of emails sent to newsrooms and fact-checkers across Europe. The authors of these messages urge recipients to verify content allegedly found online. The email subject lines often include an incitement to verify the claims briefly described in the message body. This is followed by a short list of links directing recipients to posts on Telegram, X, or known pro-Russian websites, including Pravda and Sputnik.

We have collected 221 emails sent to 20 organisations. The organisations mostly received identical emails urging them to fact-check specific false stories, which demonstrates that the emails were sent as part of a larger coordinated campaign.

[...]

The authors of the emails do not hide their intention to see the fake content widely spread. In February 2024, a journalist at the German outlet CORRECTIV engaged with the sender of one of the emails, providing feedback on the narratives which were originally sent. CORRECTIV received a response from the same Gmail address, initially expressing respect and trust in CORRECTIV’s assessment, while asking: “is it possible for your work to be seen by as many people as possible?”, thereby clearly stating the goal of the operation.

[...]

All the emails come from authors posing as concerned citizens. All emails are sent with Gmail accounts, which is typical for personal use. This makes it challenging to identify the individuals behind these emails, as anyone can open a Gmail account for free. The email headers indicate that the messages were sent from the Gmail interface, not from a personal client which would disclose the sender’s IP address.


In this example, threat actors used gmail accounts (T0146.001: Free Account Asset, T0097.100: Individual Persona, T0143.002: Fabricated Persona, T0153.001: Email Platform) to target journalists and fact-checkers, with the apparent goal of having them amplify operation narratives through fact checks. | +| [T0146.001 Free Account Asset](../../generated_pages/techniques/T0146.001.md) |  IT00000500 The unique aspect of Operation Overload is a barrage of emails sent to newsrooms and fact-checkers across Europe. The authors of these messages urge recipients to verify content allegedly found online. The email subject lines often include an incitement to verify the claims briefly described in the message body. This is followed by a short list of links directing recipients to posts on Telegram, X, or known pro-Russian websites, including Pravda and Sputnik.

We have collected 221 emails sent to 20 organisations. The organisations mostly received identical emails urging them to fact-check specific false stories, which demonstrates that the emails were sent as part of a larger coordinated campaign.

[...]

The authors of the emails do not hide their intention to see the fake content widely spread. In February 2024, a journalist at the German outlet CORRECTIV engaged with the sender of one of the emails, providing feedback on the narratives which were originally sent. CORRECTIV received a response from the same Gmail address, initially expressing respect and trust in CORRECTIV’s assessment, while asking: “is it possible for your work to be seen by as many people as possible?”, thereby clearly stating the goal of the operation.

[...]

All the emails come from authors posing as concerned citizens. All emails are sent with Gmail accounts, which is typical for personal use. This makes it challenging to identify the individuals behind these emails, as anyone can open a Gmail account for free. The email headers indicate that the messages were sent from the Gmail interface, not from a personal client which would disclose the sender’s IP address.


In this example, threat actors used gmail accounts (T0146.001: Free Account Asset, T0097.100: Individual Persona, T0143.002: Fabricated Persona, T0153.001: Email Platform) to target journalists and fact-checkers, with the apparent goal of having them amplify operation narratives through fact checks. | +| [T0153.001 Email Platform](../../generated_pages/techniques/T0153.001.md) |  IT00000502 The unique aspect of Operation Overload is a barrage of emails sent to newsrooms and fact-checkers across Europe. The authors of these messages urge recipients to verify content allegedly found online. The email subject lines often include an incitement to verify the claims briefly described in the message body. This is followed by a short list of links directing recipients to posts on Telegram, X, or known pro-Russian websites, including Pravda and Sputnik.

We have collected 221 emails sent to 20 organisations. The organisations mostly received identical emails urging them to fact-check specific false stories, which demonstrates that the emails were sent as part of a larger coordinated campaign.

[...]

The authors of the emails do not hide their intention to see the fake content widely spread. In February 2024, a journalist at the German outlet CORRECTIV engaged with the sender of one of the emails, providing feedback on the narratives which were originally sent. CORRECTIV received a response from the same Gmail address, initially expressing respect and trust in CORRECTIV’s assessment, while asking: “is it possible for your work to be seen by as many people as possible?”, thereby clearly stating the goal of the operation.

[...]

All the emails come from authors posing as concerned citizens. All emails are sent with Gmail accounts, which is typical for personal use. This makes it challenging to identify the individuals behind these emails, as anyone can open a Gmail account for free. The email headers indicate that the messages were sent from the Gmail interface, not from a personal client which would disclose the sender’s IP address.


In this example, threat actors used gmail accounts (T0146.001: Free Account Asset, T0097.100: Individual Persona, T0143.002: Fabricated Persona, T0153.001: Email Platform) to target journalists and fact-checkers, with the apparent goal of having them amplify operation narratives through fact checks. | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00122.md b/generated_pages/incidents/I00122.md index 420a15e..62fa80f 100644 --- a/generated_pages/incidents/I00122.md +++ b/generated_pages/incidents/I00122.md @@ -15,6 +15,7 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://www.isdglobal.org/wp-content/uploads/2021/08/04-gaming-report-discord.pdf](https://www.isdglobal.org/wp-content/uploads/2021/08/04-gaming-report-discord.pdf) | 2021/08/04 | Aoife Gallagher, Ciaran O’Connor, Pierre Vaux, Elise Thomas, Jacob Davey | ISD | [https://web.archive.org/web/20240528231254/https://www.isdglobal.org/wp-content/uploads/2021/08/04-gaming-report-discord.pdf](https://web.archive.org/web/20240528231254/https://www.isdglobal.org/wp-content/uploads/2021/08/04-gaming-report-discord.pdf) | diff --git a/generated_pages/incidents/I00123.md b/generated_pages/incidents/I00123.md index bca9b88..462c7e8 100644 --- a/generated_pages/incidents/I00123.md +++ b/generated_pages/incidents/I00123.md @@ -15,6 +15,7 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://www.isdglobal.org/wp-content/uploads/2021/08/02-revised-gaming-report-steam.pdf](https://www.isdglobal.org/wp-content/uploads/2021/08/02-revised-gaming-report-steam.pdf) | 2021/08/02 | Pierre Vaux, Aoife Gallagher, Jacob Davey | ISD | [https://web.archive.org/web/20240912223614/https://www.isdglobal.org/wp-content/uploads/2021/08/02-revised-gaming-report-steam.pdf](https://web.archive.org/web/20240912223614/https://www.isdglobal.org/wp-content/uploads/2021/08/02-revised-gaming-report-steam.pdf) | diff --git a/generated_pages/incidents/I00124.md b/generated_pages/incidents/I00124.md index 5b8c982..f382952 100644 --- a/generated_pages/incidents/I00124.md +++ b/generated_pages/incidents/I00124.md @@ -15,6 +15,7 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://www.mpf.se/publikationer/publikationer/2023-12-01-malign-foreign-interference-and-information-influence-on-video-game-platforms-understanding-the-adversarial-playbook](https://www.mpf.se/publikationer/publikationer/2023-12-01-malign-foreign-interference-and-information-influence-on-video-game-platforms-understanding-the-adversarial-playbook) | 2023/03/01 | Jesper Falkheimer, Elsa Isaksson, James Pamment | Myndigheten för psykologiskt försvar | [https://web.archive.org/web/20240520215840/https://www.mpf.se/publikationer/publikationer/2023-12-01-malign-foreign-interference-and-information-influence-on-video-game-platforms-understanding-the-adversarial-playbook](https://web.archive.org/web/20240520215840/https://www.mpf.se/publikationer/publikationer/2023-12-01-malign-foreign-interference-and-information-influence-on-video-game-platforms-understanding-the-adversarial-playbook) | diff --git a/generated_pages/incidents/I00125.md b/generated_pages/incidents/I00125.md index 8a1f123..1645c98 100644 --- a/generated_pages/incidents/I00125.md +++ b/generated_pages/incidents/I00125.md @@ -15,17 +15,17 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://www.nytimes.com/2015/06/07/magazine/the-agency.html?](https://www.nytimes.com/2015/06/07/magazine/the-agency.html?) | 2015/06/07 | Adrian Chen | The New York Times | [https://web.archive.org/web/20241007172237/https://www.nytimes.com/2015/06/07/magazine/the-agency.html](https://web.archive.org/web/20241007172237/https://www.nytimes.com/2015/06/07/magazine/the-agency.html) | | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0087 Develop Video-Based Content](../../generated_pages/techniques/T0087.md) |  IT00000524 In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:

[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.

Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed).

A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. | -| [T0097.101 Local Persona](../../generated_pages/techniques/T0097.101.md) | In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:

[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.

Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed).

A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. | -| [T0143.002 Fabricated Persona](../../generated_pages/techniques/T0143.002.md) |  IT00000521 In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:

[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.

Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed).

A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. | -| [T0146 Account](../../generated_pages/techniques/T0146.md) |  IT00000520 In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:

[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.

Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed).

A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. | -| [T0150.004 Repurposed](../../generated_pages/techniques/T0150.004.md) |  IT00000523 In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:

[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.

Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed).

A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. | -| [T0151.008 Microblogging Platform](../../generated_pages/techniques/T0151.008.md) |  IT00000522 In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:

[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.

Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed).

A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. | +| [T0087 Develop Video-Based Content](../../generated_pages/techniques/T0087.md) |  IT00000524 In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:

[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.

Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account Asset, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed Asset).

A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. | +| [T0143.002 Fabricated Persona](../../generated_pages/techniques/T0143.002.md) |  IT00000521 In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:

[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.

Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account Asset, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed Asset).

A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. | +| [T0146 Account Asset](../../generated_pages/techniques/T0146.md) |  IT00000520 In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:

[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.

Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account Asset, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed Asset).

A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. | +| [T0150.004 Repurposed Asset](../../generated_pages/techniques/T0150.004.md) |  IT00000523 In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:

[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.

Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account Asset, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed Asset).

A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. | +| [T0151.008 Microblogging Platform](../../generated_pages/techniques/T0151.008.md) |  IT00000522 In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:

[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.

Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account Asset, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed Asset).

A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00126.md b/generated_pages/incidents/I00126.md index cbee212..3f1c746 100644 --- a/generated_pages/incidents/I00126.md +++ b/generated_pages/incidents/I00126.md @@ -15,18 +15,19 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://www.volexity.com/blog/2023/06/28/charming-kitten-updates-powerstar-with-an-interplanetary-twist/](https://www.volexity.com/blog/2023/06/28/charming-kitten-updates-powerstar-with-an-interplanetary-twist/) | 2023/06/28 | Ankur Saini, Charlie Gardner | Volexity | [https://web.archive.org/web/20241009175500/https://www.volexity.com/blog/2023/06/28/charming-kitten-updates-powerstar-with-an-interplanetary-twist/](https://web.archive.org/web/20241009175500/https://www.volexity.com/blog/2023/06/28/charming-kitten-updates-powerstar-with-an-interplanetary-twist/) | | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0085.004 Develop Document](../../generated_pages/techniques/T0085.004.md) |  IT00000530 The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware). | -| [T0097.102 Journalist Persona](../../generated_pages/techniques/T0097.102.md) |  IT00000525 The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware). | -| [T0097.202 News Outlet Persona](../../generated_pages/techniques/T0097.202.md) |  IT00000526 The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware). | -| [T0143.003 Impersonated Persona](../../generated_pages/techniques/T0143.003.md) |  IT00000527 The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware). | -| [T0147.003 Malware](../../generated_pages/techniques/T0147.003.md) |  IT00000531 The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware). | -| [T0149.002 Email Domain](../../generated_pages/techniques/T0149.002.md) |  IT00000529 The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware). | -| [T0149.003 Lookalike Domain](../../generated_pages/techniques/T0149.003.md) |  IT00000528 The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware). | +| [T0085.004 Develop Document](../../generated_pages/techniques/T0085.004.md) |  IT00000530 The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain Asset) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware Asset). | +| [T0097.102 Journalist Persona](../../generated_pages/techniques/T0097.102.md) |  IT00000525 The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain Asset) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware Asset). | +| [T0097.202 News Outlet Persona](../../generated_pages/techniques/T0097.202.md) |  IT00000526 The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain Asset) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware Asset). | +| [T0143.003 Impersonated Persona](../../generated_pages/techniques/T0143.003.md) |  IT00000527 The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain Asset) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware Asset). | +| [T0147.003 Malware Asset](../../generated_pages/techniques/T0147.003.md) |  IT00000531 The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain Asset) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware Asset). | +| [T0149.002 Email Domain Asset](../../generated_pages/techniques/T0149.002.md) |  IT00000529 The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain Asset) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware Asset). | +| [T0149.003 Lookalike Domain](../../generated_pages/techniques/T0149.003.md) |  IT00000528 The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain Asset) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware Asset). | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00127.md b/generated_pages/incidents/I00127.md index c53e7f9..4e48e2f 100644 --- a/generated_pages/incidents/I00127.md +++ b/generated_pages/incidents/I00127.md @@ -15,6 +15,7 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://www.darkreading.com/threat-intelligence/iranian-apts-dress-up-as-hacktivists-for-disruption-influence-ops](https://www.darkreading.com/threat-intelligence/iranian-apts-dress-up-as-hacktivists-for-disruption-influence-ops) | 2024/02/21 | Nate Nelson | DarkReading | [https://web.archive.org/web/20240221113558/https://www.darkreading.com/threat-intelligence/iranian-apts-dress-up-as-hacktivists-for-disruption-influence-ops](https://web.archive.org/web/20240221113558/https://www.darkreading.com/threat-intelligence/iranian-apts-dress-up-as-hacktivists-for-disruption-influence-ops) | diff --git a/generated_pages/incidents/I00128.md b/generated_pages/incidents/I00128.md index ab6abc5..4163e10 100644 --- a/generated_pages/incidents/I00128.md +++ b/generated_pages/incidents/I00128.md @@ -15,17 +15,18 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://medium.com/dfrlab/trolltracker-outward-influence-operation-from-iran-cc4539684c8d](https://medium.com/dfrlab/trolltracker-outward-influence-operation-from-iran-cc4539684c8d) | 2019/01/31 | Kanishk Karan, Donara Barojan, Melissa Hall, Graham Brookie | DFRLab | [https://web.archive.org/web/20240618014243/https://medium.com/dfrlab/trolltracker-outward-influence-operation-from-iran-cc4539684c8d](https://web.archive.org/web/20240618014243/https://medium.com/dfrlab/trolltracker-outward-influence-operation-from-iran-cc4539684c8d) | | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0097.208 Social Cause Persona](../../generated_pages/techniques/T0097.208.md) |  IT00000534 [Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.

The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.

Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.


In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code), which took users to a document hosted on another website (T0152.004: Website). | -| [T0122 Direct Users to Alternative Platforms](../../generated_pages/techniques/T0122.md) |  IT00000537 [Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.

The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.

Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.


In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code), which took users to a document hosted on another website (T0152.004: Website). | -| [T0151.001 Social Media Platform](../../generated_pages/techniques/T0151.001.md) |  IT00000535 [Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.

The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.

Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.


In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code), which took users to a document hosted on another website (T0152.004: Website). | -| [T0151.002 Online Community Group](../../generated_pages/techniques/T0151.002.md) |  IT00000536 [Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.

The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.

Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.


In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code), which took users to a document hosted on another website (T0152.004: Website). | -| [T0152.004 Website](../../generated_pages/techniques/T0152.004.md) |  IT00000539 [Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.

The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.

Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.


In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code), which took users to a document hosted on another website (T0152.004: Website). | -| [T0153.004 QR Code](../../generated_pages/techniques/T0153.004.md) |  IT00000538 [Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.

The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.

Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.


In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code), which took users to a document hosted on another website (T0152.004: Website). | +| [T0097.208 Social Cause Persona](../../generated_pages/techniques/T0097.208.md) |  IT00000534 [Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.

The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.

Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.


In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code Asset), which took users to a document hosted on another website (T0152.004: Website Asset). | +| [T0122 Direct Users to Alternative Platforms](../../generated_pages/techniques/T0122.md) |  IT00000537 [Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.

The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.

Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.


In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code Asset), which took users to a document hosted on another website (T0152.004: Website Asset). | +| [T0151.001 Social Media Platform](../../generated_pages/techniques/T0151.001.md) |  IT00000535 [Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.

The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.

Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.


In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code Asset), which took users to a document hosted on another website (T0152.004: Website Asset). | +| [T0151.002 Online Community Group](../../generated_pages/techniques/T0151.002.md) |  IT00000536 [Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.

The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.

Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.


In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code Asset), which took users to a document hosted on another website (T0152.004: Website Asset). | +| [T0152.004 Website Asset](../../generated_pages/techniques/T0152.004.md) |  IT00000539 [Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.

The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.

Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.


In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code Asset), which took users to a document hosted on another website (T0152.004: Website Asset). | +| [T0153.004 QR Code Asset](../../generated_pages/techniques/T0153.004.md) |  IT00000538 [Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.

The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.

Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.


In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code Asset), which took users to a document hosted on another website (T0152.004: Website Asset). | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/incidents/I00129.md b/generated_pages/incidents/I00129.md index 2e1d3d6..bf3399f 100644 --- a/generated_pages/incidents/I00129.md +++ b/generated_pages/incidents/I00129.md @@ -15,18 +15,18 @@ | Reference | Pub Date | Authors | Org | Archive | | --------- | -------- | ------- | --- | ------- | +| [https://www.theguardian.com/technology/2021/mar/16/florida-teen-sentenced-twitter-bitcoin-hack](https://www.theguardian.com/technology/2021/mar/16/florida-teen-sentenced-twitter-bitcoin-hack) | 2021/03/16 | Kari Paul | The Guardian | [https://web.archive.org/web/20240920230915/https://www.theguardian.com/technology/2021/mar/16/florida-teen-sentenced-twitter-bitcoin-hack](https://web.archive.org/web/20240920230915/https://www.theguardian.com/technology/2021/mar/16/florida-teen-sentenced-twitter-bitcoin-hack) | | Technique | Description given for this incident | | --------- | ------------------------- | -| [T0143.003 Impersonated Persona](../../generated_pages/techniques/T0143.003.md) |  IT00000544 An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.

Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.

Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.

Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.


In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account, T0150.005: Compromised, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account, T0143.003: Impersonated Persona, T0150.005: Compromised, T0151.008: Microblogging Platform).

The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). | -| [T0146.004 Administrator Account](../../generated_pages/techniques/T0146.004.md) |  IT00000543 An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.

Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.

Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.

Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.


In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account, T0150.005: Compromised, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account, T0143.003: Impersonated Persona, T0150.005: Compromised, T0151.008: Microblogging Platform).

The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). | -| [T0146.005 Lookalike Account ID](../../generated_pages/techniques/T0146.005.md) |  IT00000540 An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.

Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.

Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.

Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.


In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account, T0150.005: Compromised, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account, T0143.003: Impersonated Persona, T0150.005: Compromised, T0151.008: Microblogging Platform).

The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). | -| [T0148.009 Cryptocurrency Wallet](../../generated_pages/techniques/T0148.009.md) |  IT00000546 An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.

Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.

Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.

Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.


In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account, T0150.005: Compromised, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account, T0143.003: Impersonated Persona, T0150.005: Compromised, T0151.008: Microblogging Platform).

The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). | -| [T0150.005 Compromised](../../generated_pages/techniques/T0150.005.md) |  IT00000541 An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.

Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.

Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.

Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.


In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account, T0150.005: Compromised, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account, T0143.003: Impersonated Persona, T0150.005: Compromised, T0151.008: Microblogging Platform).

The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). | -| [T0150.005 Compromised](../../generated_pages/techniques/T0150.005.md) |  IT00000545 An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.

Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.

Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.

Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.


In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account, T0150.005: Compromised, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account, T0143.003: Impersonated Persona, T0150.005: Compromised, T0151.008: Microblogging Platform).

The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). | -| [T0151.008 Microblogging Platform](../../generated_pages/techniques/T0151.008.md) |  IT00000542 An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.

Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.

Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.

Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.


In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account, T0150.005: Compromised, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account, T0143.003: Impersonated Persona, T0150.005: Compromised, T0151.008: Microblogging Platform).

The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). | +| [T0143.003 Impersonated Persona](../../generated_pages/techniques/T0143.003.md) |  IT00000544 An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.

Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.

Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.

Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.


In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account Asset, T0150.005: Compromised Asset, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account Asset, T0143.003: Impersonated Persona, T0150.005: Compromised Asset, T0151.008: Microblogging Platform).

The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). | +| [T0146.004 Administrator Account Asset](../../generated_pages/techniques/T0146.004.md) |  IT00000543 An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.

Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.

Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.

Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.


In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account Asset, T0150.005: Compromised Asset, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account Asset, T0143.003: Impersonated Persona, T0150.005: Compromised Asset, T0151.008: Microblogging Platform).

The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). | +| [T0148.009 Cryptocurrency Wallet](../../generated_pages/techniques/T0148.009.md) |  IT00000546 An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.

Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.

Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.

Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.


In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account Asset, T0150.005: Compromised Asset, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account Asset, T0143.003: Impersonated Persona, T0150.005: Compromised Asset, T0151.008: Microblogging Platform).

The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). | +| [T0150.005 Compromised Asset](../../generated_pages/techniques/T0150.005.md) |  IT00000541 An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.

Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.

Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.

Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.


In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account Asset, T0150.005: Compromised Asset, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account Asset, T0143.003: Impersonated Persona, T0150.005: Compromised Asset, T0151.008: Microblogging Platform).

The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). | +| [T0150.005 Compromised Asset](../../generated_pages/techniques/T0150.005.md) |  IT00000545 An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.

Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.

Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.

Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.


In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account Asset, T0150.005: Compromised Asset, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account Asset, T0143.003: Impersonated Persona, T0150.005: Compromised Asset, T0151.008: Microblogging Platform).

The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). | +| [T0151.008 Microblogging Platform](../../generated_pages/techniques/T0151.008.md) |  IT00000542 An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.

Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.

Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.

Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.


In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account Asset, T0150.005: Compromised Asset, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account Asset, T0143.003: Impersonated Persona, T0150.005: Compromised Asset, T0151.008: Microblogging Platform).

The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). | DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW \ No newline at end of file diff --git a/generated_pages/tactics/TA06.md b/generated_pages/tactics/TA06.md index 06fa0ed..a9f7417 100644 --- a/generated_pages/tactics/TA06.md +++ b/generated_pages/tactics/TA06.md @@ -49,22 +49,22 @@ | [T0089 Obtain Private Documents](../../generated_pages/techniques/T0089.md) | | [T0089.001 Obtain Authentic Documents](../../generated_pages/techniques/T0089.001.md) | | [T0089.003 Alter Authentic Documents](../../generated_pages/techniques/T0089.003.md) | -| [T0146 Account](../../generated_pages/techniques/T0146.md) | -| [T0146.001 Free Account](../../generated_pages/techniques/T0146.001.md) | -| [T0146.002 Paid Account](../../generated_pages/techniques/T0146.002.md) | -| [T0146.003 Verified Account](../../generated_pages/techniques/T0146.003.md) | -| [T0146.004 Administrator Account](../../generated_pages/techniques/T0146.004.md) | +| [T0146 Account Asset](../../generated_pages/techniques/T0146.md) | +| [T0146.001 Free Account Asset](../../generated_pages/techniques/T0146.001.md) | +| [T0146.002 Paid Account Asset](../../generated_pages/techniques/T0146.002.md) | +| [T0146.003 Verified Account Asset](../../generated_pages/techniques/T0146.003.md) | +| [T0146.004 Administrator Account Asset](../../generated_pages/techniques/T0146.004.md) | | [T0146.005 Lookalike Account ID](../../generated_pages/techniques/T0146.005.md) | | [T0146.006 Open Access Platform](../../generated_pages/techniques/T0146.006.md) | -| [T0146.007 Automated Account](../../generated_pages/techniques/T0146.007.md) | -| [T0147 Software](../../generated_pages/techniques/T0147.md) | -| [T0147.001 Game](../../generated_pages/techniques/T0147.001.md) | -| [T0147.002 Game Mod](../../generated_pages/techniques/T0147.002.md) | -| [T0147.003 Malware](../../generated_pages/techniques/T0147.003.md) | -| [T0147.004 Mobile App](../../generated_pages/techniques/T0147.004.md) | +| [T0146.007 Automated Account Asset](../../generated_pages/techniques/T0146.007.md) | +| [T0147 Software Asset](../../generated_pages/techniques/T0147.md) | +| [T0147.001 Game Asset](../../generated_pages/techniques/T0147.001.md) | +| [T0147.002 Game Mod Asset](../../generated_pages/techniques/T0147.002.md) | +| [T0147.003 Malware Asset](../../generated_pages/techniques/T0147.003.md) | +| [T0147.004 Mobile App Asset](../../generated_pages/techniques/T0147.004.md) | | [T0148 Financial Instrument](../../generated_pages/techniques/T0148.md) | | [T0148.001 Online Banking Platform](../../generated_pages/techniques/T0148.001.md) | -| [T0148.002 Bank Account](../../generated_pages/techniques/T0148.002.md) | +| [T0148.002 Bank Account Asset](../../generated_pages/techniques/T0148.002.md) | | [T0148.003 Payment Processing Platform](../../generated_pages/techniques/T0148.003.md) | | [T0148.004 Payment Processing Capability](../../generated_pages/techniques/T0148.004.md) | | [T0148.005 Subscription Processing Capability](../../generated_pages/techniques/T0148.005.md) | @@ -73,24 +73,24 @@ | [T0148.008 Cryptocurrency Exchange Platform](../../generated_pages/techniques/T0148.008.md) | | [T0148.009 Cryptocurrency Wallet](../../generated_pages/techniques/T0148.009.md) | | [T0149 Online Infrastructure](../../generated_pages/techniques/T0149.md) | -| [T0149.001 Domain](../../generated_pages/techniques/T0149.001.md) | -| [T0149.002 Email Domain](../../generated_pages/techniques/T0149.002.md) | +| [T0149.001 Domain Asset](../../generated_pages/techniques/T0149.001.md) | +| [T0149.002 Email Domain Asset](../../generated_pages/techniques/T0149.002.md) | | [T0149.003 Lookalike Domain](../../generated_pages/techniques/T0149.003.md) | -| [T0149.004 Redirecting Domain](../../generated_pages/techniques/T0149.004.md) | -| [T0149.005 Server](../../generated_pages/techniques/T0149.005.md) | -| [T0149.006 IP Address](../../generated_pages/techniques/T0149.006.md) | -| [T0149.007 VPN](../../generated_pages/techniques/T0149.007.md) | -| [T0149.008 Proxy IP Address](../../generated_pages/techniques/T0149.008.md) | +| [T0149.004 Redirecting Domain Asset](../../generated_pages/techniques/T0149.004.md) | +| [T0149.005 Server Asset](../../generated_pages/techniques/T0149.005.md) | +| [T0149.006 IP Address Asset](../../generated_pages/techniques/T0149.006.md) | +| [T0149.007 VPN Asset](../../generated_pages/techniques/T0149.007.md) | +| [T0149.008 Proxy IP Address Asset](../../generated_pages/techniques/T0149.008.md) | | [T0149.009 Internet Connected Physical Asset](../../generated_pages/techniques/T0149.009.md) | | [T0150 Asset Origin](../../generated_pages/techniques/T0150.md) | -| [T0150.001 Newly Created](../../generated_pages/techniques/T0150.001.md) | -| [T0150.002 Dormant](../../generated_pages/techniques/T0150.002.md) | -| [T0150.003 Pre-Existing](../../generated_pages/techniques/T0150.003.md) | -| [T0150.004 Repurposed](../../generated_pages/techniques/T0150.004.md) | -| [T0150.005 Compromised](../../generated_pages/techniques/T0150.005.md) | -| [T0150.006 Purchased](../../generated_pages/techniques/T0150.006.md) | -| [T0150.007 Rented](../../generated_pages/techniques/T0150.007.md) | -| [T0150.008 Bulk Created](../../generated_pages/techniques/T0150.008.md) | +| [T0150.001 Newly Created Asset](../../generated_pages/techniques/T0150.001.md) | +| [T0150.002 Dormant Asset](../../generated_pages/techniques/T0150.002.md) | +| [T0150.003 Pre-Existing Asset](../../generated_pages/techniques/T0150.003.md) | +| [T0150.004 Repurposed Asset](../../generated_pages/techniques/T0150.004.md) | +| [T0150.005 Compromised Asset](../../generated_pages/techniques/T0150.005.md) | +| [T0150.006 Purchased Asset](../../generated_pages/techniques/T0150.006.md) | +| [T0150.007 Rented Asset](../../generated_pages/techniques/T0150.007.md) | +| [T0150.008 Bulk Created Asset](../../generated_pages/techniques/T0150.008.md) | diff --git a/generated_pages/tactics/TA07.md b/generated_pages/tactics/TA07.md index a1d7eb7..88b4a19 100644 --- a/generated_pages/tactics/TA07.md +++ b/generated_pages/tactics/TA07.md @@ -42,9 +42,9 @@ | [T0151.017 Dating Platform](../../generated_pages/techniques/T0151.017.md) | | [T0152 Digital Content Hosting Asset](../../generated_pages/techniques/T0152.md) | | [T0152.001 Blogging Platform](../../generated_pages/techniques/T0152.001.md) | -| [T0152.002 Blog](../../generated_pages/techniques/T0152.002.md) | +| [T0152.002 Blog Asset](../../generated_pages/techniques/T0152.002.md) | | [T0152.003 Website Hosting Platform](../../generated_pages/techniques/T0152.003.md) | -| [T0152.004 Website](../../generated_pages/techniques/T0152.004.md) | +| [T0152.004 Website Asset](../../generated_pages/techniques/T0152.004.md) | | [T0152.005 Paste Platform](../../generated_pages/techniques/T0152.005.md) | | [T0152.006 Video Platform](../../generated_pages/techniques/T0152.006.md) | | [T0152.007 Audio Platform](../../generated_pages/techniques/T0152.007.md) | @@ -56,8 +56,8 @@ | [T0153 Digital Content Delivery Asset](../../generated_pages/techniques/T0153.md) | | [T0153.001 Email Platform](../../generated_pages/techniques/T0153.001.md) | | [T0153.002 Link Shortening Platform](../../generated_pages/techniques/T0153.002.md) | -| [T0153.003 Shortened Link](../../generated_pages/techniques/T0153.003.md) | -| [T0153.004 QR Code](../../generated_pages/techniques/T0153.004.md) | +| [T0153.003 Shortened Link Asset](../../generated_pages/techniques/T0153.003.md) | +| [T0153.004 QR Code Asset](../../generated_pages/techniques/T0153.004.md) | | [T0153.005 Online Advertising Platform](../../generated_pages/techniques/T0153.005.md) | | [T0153.006 Content Recommendation Algorithm](../../generated_pages/techniques/T0153.006.md) | | [T0153.007 Direct Messaging](../../generated_pages/techniques/T0153.007.md) | @@ -65,12 +65,12 @@ | [T0154.001 AI LLM Platform](../../generated_pages/techniques/T0154.001.md) | | [T0154.002 AI Media Platform](../../generated_pages/techniques/T0154.002.md) | | [T0155 Gated Asset](../../generated_pages/techniques/T0155.md) | -| [T0155.001 Password Gated](../../generated_pages/techniques/T0155.001.md) | -| [T0155.002 Invite Gated](../../generated_pages/techniques/T0155.002.md) | -| [T0155.003 Approval Gated](../../generated_pages/techniques/T0155.003.md) | -| [T0155.004 Geoblocked](../../generated_pages/techniques/T0155.004.md) | -| [T0155.005 Paid Access](../../generated_pages/techniques/T0155.005.md) | -| [T0155.006 Subscription Access](../../generated_pages/techniques/T0155.006.md) | +| [T0155.001 Password Gated Asset](../../generated_pages/techniques/T0155.001.md) | +| [T0155.002 Invite Gated Asset](../../generated_pages/techniques/T0155.002.md) | +| [T0155.003 Approval Gated Asset](../../generated_pages/techniques/T0155.003.md) | +| [T0155.004 Geoblocked Asset](../../generated_pages/techniques/T0155.004.md) | +| [T0155.005 Paid Access Asset](../../generated_pages/techniques/T0155.005.md) | +| [T0155.006 Subscription Access Asset](../../generated_pages/techniques/T0155.006.md) | | [T0155.007 Encrypted Communication Channel](../../generated_pages/techniques/T0155.007.md) | diff --git a/generated_pages/techniques/T0002.md b/generated_pages/techniques/T0002.md index ba0220c..4a323c9 100644 --- a/generated_pages/techniques/T0002.md +++ b/generated_pages/techniques/T0002.md @@ -7,8 +7,6 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00033 China 50cent Army](../../generated_pages/incidents/I00033.md) | facilitate state propaganda and defuse crises | -| [I00034 DibaFacebookExpedition](../../generated_pages/incidents/I00034.md) | Netizens from one of the largest discussion forums in China, known as Diba, coordinated to overcome China’s Great Firewall | diff --git a/generated_pages/techniques/T0010.md b/generated_pages/techniques/T0010.md index 9dd47de..6aef7db 100644 --- a/generated_pages/techniques/T0010.md +++ b/generated_pages/techniques/T0010.md @@ -7,17 +7,6 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00005 Brexit vote](../../generated_pages/incidents/I00005.md) | cultivate, manipulate, exploit useful idiots | -| [I00007 Incirlik terrorists](../../generated_pages/incidents/I00007.md) | cultivate, manipulate, exploit useful idiots (in the case Paul Manafort) | -| [I00010 ParklandTeens](../../generated_pages/incidents/I00010.md) | cultivate, manipulate, exploit useful idiots (Alex Jones... drives conspiracy theories; false flags, crisis actors) | -| [I00017 US presidential elections](../../generated_pages/incidents/I00017.md) | cultivate, manipulate, exploit useful idiots | -| [I00029 MH17 investigation](../../generated_pages/incidents/I00029.md) | cultivate, manipulate, exploit useful idiots | -| [I00032 Kavanaugh](../../generated_pages/incidents/I00032.md) | cultivate, manipulate, exploit useful idiots (Alex Jones... drives conspiracy theories) | -| [I00044 JadeHelm exercise](../../generated_pages/incidents/I00044.md) | cultivate, manipulate, exploit useful idiots (Alex Jones... drives conspiracy theories) | -| [I00049 White Helmets: Chemical Weapons](../../generated_pages/incidents/I00049.md) | cultivate, manipulate, exploit useful idiots (Roger Waters; Venessa Beeley...) | -| [I00050 #HandsOffVenezuela](../../generated_pages/incidents/I00050.md) | cultivate, manipulate, exploit useful idiots (Roger Waters) | -| [I00051 Integrity Initiative](../../generated_pages/incidents/I00051.md) | cultivate, manipulate, exploit useful idiots | -| [I00063 Olympic Doping Scandal](../../generated_pages/incidents/I00063.md) | cultivate, manipulate, exploit useful idiots | diff --git a/generated_pages/techniques/T0015.md b/generated_pages/techniques/T0015.md index 3a9322e..e1ff536 100644 --- a/generated_pages/techniques/T0015.md +++ b/generated_pages/techniques/T0015.md @@ -7,7 +7,6 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00006 Columbian Chemicals](../../generated_pages/incidents/I00006.md) | Create and use hashtag | | [I00086 #WeAreNotSafe – Exposing How a Post-October 7th Disinformation Network Operates on Israeli Social Media](../../generated_pages/incidents/I00086.md) | In this report accounts were identified as part of “a sophisticated and extensive coordinated network orchestrating a disinformation campaign targeting Israeli digital spaces since October 7th, 2023”, which posted hashtags alongside campaign content (T0015: Create Hashtags and Search Artefacts):

“The accounts post generic images to fill their account feed to make the account seem real. They then employ a hidden hashtag in their posts, consisting of a seemingly random string of numbers and letters.

“The hypothesis regarding this tactic is that the group orchestrating these accounts utilizes these hashtags as a means of indexing them. This system likely serves a dual purpose: firstly, to keep track of the network’s expansive network of accounts and unique posts, and secondly, to streamline the process of boosting engagement among these accounts. By searching for these specific, unique hashtags, the group can quickly locate posts from their network and engage with them using other fake accounts, thereby artificially inflating the visibility and perceived authenticity of the fake account.”
| diff --git a/generated_pages/techniques/T0016.md b/generated_pages/techniques/T0016.md index 0d62716..39a54fa 100644 --- a/generated_pages/techniques/T0016.md +++ b/generated_pages/techniques/T0016.md @@ -7,7 +7,6 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00017 US presidential elections](../../generated_pages/incidents/I00017.md) | Click-bait (economic actors) fake news sites (ie: Denver Guardian; Macedonian teens) | | [I00079 Three thousand fake tanks](../../generated_pages/incidents/I00079.md) | “On January 4 [2017], however, the Donbas News International (DNI) agency, based in Donetsk, Ukraine, and (since September 2016) an official state media outlet of the unrecognized separatist Donetsk People’s Republic, ran an article under the sensational headline, “US sends 3,600 tanks against Russia — massive NATO deployment under way.” DNI is run by Finnish exile Janus Putkonen, described by the Finnish national broadcaster, YLE, as a “Finnish info warrior”, and the first foreigner to be granted a Donetsk passport.

“The equally sensational opening paragraph ran, “The NATO war preparation against Russia, ‘Operation Atlantic Resolve’, is in full swing. 2,000 US tanks will be sent in coming days from Germany to Eastern Europe, and 1,600 US tanks is deployed to storage facilities in the Netherlands. At the same time, NATO countries are sending thousands of soldiers in to Russian borders.”

“The report is based around an obvious factual error, conflating the total number of vehicles with the actual number of tanks, and therefore multiplying the actual tank force 20 times over. For context, military website globalfirepower.com puts the total US tank force at 8,848. If the DNI story had been true, it would have meant sending 40% of all the US’ main battle tanks to Europe in one go.

“Could this have been an innocent mistake? The simple answer is “no”. The journalist who penned the story had a sufficient command of the details to be able to write, later in the same article, “In January, 26 tanks, 100 other vehicles and 120 containers will be transported by train to Lithuania. Germany will send the 122nd Infantry Battalion.” Yet the same author apparently believed, in the headline and first paragraph, that every single vehicle in Atlantic Resolve is a tank. To call this an innocent mistake is simply not plausible.

“The DNI story can only realistically be considered a deliberate fake designed to caricaturize and demonize NATO, the United States and Germany (tactfully referred to in the report as having “rolled over Eastern Europe in its war of extermination 75 years ago”) by grossly overstating the number of MBTs involved.”


This behaviour matches T0016: Create Clickbait because the person who wrote the story is shown to be aware of the fact that there were non-tank vehicles later in their story, but still chose to give the article a sensationalist headline claiming that all vehicles being sent were tanks. | diff --git a/generated_pages/techniques/T0017.md b/generated_pages/techniques/T0017.md index aeb0ec9..dc9c87b 100644 --- a/generated_pages/techniques/T0017.md +++ b/generated_pages/techniques/T0017.md @@ -7,8 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00002 #VaccinateUS](../../generated_pages/incidents/I00002.md) | Promote "funding" campaign | -| [I00110 How COVID-19 conspiracists and extremists use crowdfunding platforms to fund their activities](../../generated_pages/incidents/I00110.md) | The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:



More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | +| [I00110 How COVID-19 conspiracists and extremists use crowdfunding platforms to fund their activities](../../generated_pages/incidents/I00110.md) | The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:

More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | diff --git a/generated_pages/techniques/T0018.md b/generated_pages/techniques/T0018.md index 25784ea..0310669 100644 --- a/generated_pages/techniques/T0018.md +++ b/generated_pages/techniques/T0018.md @@ -7,9 +7,6 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00002 #VaccinateUS](../../generated_pages/incidents/I00002.md) | buy FB targeted ads | -| [I00005 Brexit vote](../../generated_pages/incidents/I00005.md) | Targeted FB paid ads | -| [I00017 US presidential elections](../../generated_pages/incidents/I00017.md) | Targeted FB paid ads | | [I00097 Report: Not Just Algorithms](../../generated_pages/incidents/I00097.md) | This report explores the role of four systems (recommender systems, content moderation systems, ad approval systems and ad management systems) in creating risks around eating disorders.

[...]

Ad approval systems can create risks. We created 12 ‘fake’ ads that promoted dangerous weight loss techniques and behaviours. We tested to see if these ads would be approved to run, and they were. This means dangerous behaviours can be promoted in paid-for advertising. (Requests to run ads were withdrawn after approval or rejection, so no dangerous advertising was published as a result of this experiment.)

Specifically: On TikTok, 100% of the ads were approved to run; On Facebook, 83% of the ads were approved to run; On Google, 75% of the ads were approved to run.

Ad management systems can create risks. We investigated how platforms allow advertisers to target users, and found that it is possible to target people who may be interested in pro-eating disorder content.

Specifically: On TikTok: End-users who interact with pro-eating disorder content on TikTok, download advertisers’ eating disorder apps or visit their websites can be targeted; On Meta: End-users who interact with pro-eating disorder content on Meta, download advertisers’ eating disorder apps or visit their websites can be targeted; On X: End-users who follow pro- eating disorder accounts, or ‘look’ like them, can be targeted; On Google: End-users who search specific words or combinations of words (including pro-eating disorder words), watch pro-eating disorder YouTube channels and probably those who download eating disorder and mental health apps can be targeted.


Advertising platforms managed by TikTok, Facebook, and Google approved adverts to be displayed on their platforms. These platforms enabled users to deliver targeted advertising to potentially vulnerable platform users (T0018: Purchase Targeted Advertisements, T0153.005: Online Advertising Platform). | diff --git a/generated_pages/techniques/T0020.md b/generated_pages/techniques/T0020.md index 895f401..2608224 100644 --- a/generated_pages/techniques/T0020.md +++ b/generated_pages/techniques/T0020.md @@ -7,10 +7,6 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00010 ParklandTeens](../../generated_pages/incidents/I00010.md) | 4Chan/8Chan - trial content | -| [I00017 US presidential elections](../../generated_pages/incidents/I00017.md) | 4Chan/8Chan - trial content | -| [I00032 Kavanaugh](../../generated_pages/incidents/I00032.md) | 4Chan/8Chan - trial content | -| [I00044 JadeHelm exercise](../../generated_pages/incidents/I00044.md) | 4Chan/8Chan - trial content | diff --git a/generated_pages/techniques/T0022.md b/generated_pages/techniques/T0022.md index c604c98..c1101a5 100644 --- a/generated_pages/techniques/T0022.md +++ b/generated_pages/techniques/T0022.md @@ -7,7 +7,6 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00056 Iran Influence Operations](../../generated_pages/incidents/I00056.md) | Memes... anti-Isreal/USA/West, conspiracy narratives | diff --git a/generated_pages/techniques/T0023.md b/generated_pages/techniques/T0023.md index b2c20c5..8d4e11d 100644 --- a/generated_pages/techniques/T0023.md +++ b/generated_pages/techniques/T0023.md @@ -7,8 +7,6 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00047 Sea of Azov](../../generated_pages/incidents/I00047.md) | (Distort) Kremlin-controlled RT cited Russian Minister of Foreign Affairs Sergei Lavrov suggesting that Ukraine deliberately provoked Russia in hopes of gaining additional support from the United States and Europe. | -| [I00053 China Huawei CFO Arrest](../../generated_pages/incidents/I00053.md) | Distorted, saccharine “news” about the Chinese State and Party | | [I00079 Three thousand fake tanks](../../generated_pages/incidents/I00079.md) | “On January 4 [2017], a little-known news site based in Donetsk, Ukraine published an article claiming that the United States was sending 3,600 tanks to Europe as part of “the NATO war preparation against Russia”.

“Like much fake news, this story started with a grain of truth: the US was about to reinforce its armored units in Europe. However, the article converted literally thousands of other vehicles — including hundreds of Humvees and trailers — into tanks, building the US force into something 20 times more powerful than it actually was.

“The story caught on online. Within three days it had been repeated by a dozen websites in the United States, Canada and Europe, and shared some 40,000 times. It was translated into Norwegian; quoted, unchallenged, by Russian state news agency RIA Novosti; and spread among Russian-language websites.

“It was also an obvious fake, as any Google news search would have revealed. Yet despite its evident falsehood, it spread widely, and not just in directly Kremlin-run media. Tracking the spread of this fake therefore shines a light on the wider question of how fake stories are dispersed.”


Russian state news agency RIA Novosti presents themselves as a news outlet (T0097.202: News Outlet Persona). RIO Novosti is a real news outlet (T0143.001: Authentic Persona), but it did not carry out a basic investigation into the veracity of the narrative they published implicitly expected of institutions presenting themselves as news outlets.

We can’t know how or why this narrative ended up being published by RIA Novosti, but we know that it presented a distorted reality as authentic information (T0023: Distort Facts), claiming that the US was sending 3,600 tanks, instead of 3,600 vehicles which included ~180 tanks. | diff --git a/generated_pages/techniques/T0029.md b/generated_pages/techniques/T0029.md index da6499c..a43c0d2 100644 --- a/generated_pages/techniques/T0029.md +++ b/generated_pages/techniques/T0029.md @@ -7,8 +7,6 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00005 Brexit vote](../../generated_pages/incidents/I00005.md) | manipulate social media "online polls"? | -| [I00017 US presidential elections](../../generated_pages/incidents/I00017.md) | manipulate social media "online polls"? | diff --git a/generated_pages/techniques/T0039.md b/generated_pages/techniques/T0039.md index bebaf12..c4dc269 100644 --- a/generated_pages/techniques/T0039.md +++ b/generated_pages/techniques/T0039.md @@ -7,9 +7,6 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00006 Columbian Chemicals](../../generated_pages/incidents/I00006.md) | bait journalists/media/politicians | -| [I00010 ParklandTeens](../../generated_pages/incidents/I00010.md) | journalist/media baiting | -| [I00015 ConcordDiscovery](../../generated_pages/incidents/I00015.md) | journalist/media baiting | diff --git a/generated_pages/techniques/T0040.md b/generated_pages/techniques/T0040.md index df5588c..ab102f3 100644 --- a/generated_pages/techniques/T0040.md +++ b/generated_pages/techniques/T0040.md @@ -7,8 +7,6 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00029 MH17 investigation](../../generated_pages/incidents/I00029.md) | Demand insurmountable proof | -| [I00047 Sea of Azov](../../generated_pages/incidents/I00047.md) | Demand insurmountable proof | diff --git a/generated_pages/techniques/T0044.md b/generated_pages/techniques/T0044.md index 149918c..b72716d 100644 --- a/generated_pages/techniques/T0044.md +++ b/generated_pages/techniques/T0044.md @@ -7,7 +7,6 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00015 ConcordDiscovery](../../generated_pages/incidents/I00015.md) | Circulate to media via DM, then release publicly | diff --git a/generated_pages/techniques/T0045.md b/generated_pages/techniques/T0045.md index a6ee98e..8e72dd6 100644 --- a/generated_pages/techniques/T0045.md +++ b/generated_pages/techniques/T0045.md @@ -7,7 +7,6 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00009 PhilippinesExpert](../../generated_pages/incidents/I00009.md) | Using "expert" | diff --git a/generated_pages/techniques/T0046.md b/generated_pages/techniques/T0046.md index ab36822..bb4f62b 100644 --- a/generated_pages/techniques/T0046.md +++ b/generated_pages/techniques/T0046.md @@ -7,18 +7,6 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00002 #VaccinateUS](../../generated_pages/incidents/I00002.md) | SEO optimisation/manipulation ("key words") | -| [I00005 Brexit vote](../../generated_pages/incidents/I00005.md) | SEO optimisation/manipulation ("key words") | -| [I00010 ParklandTeens](../../generated_pages/incidents/I00010.md) | SEO optimisation/manipulation ("key words") | -| [I00017 US presidential elections](../../generated_pages/incidents/I00017.md) | SEO optimisation/manipulation ("key words") | -| [I00029 MH17 investigation](../../generated_pages/incidents/I00029.md) | SEO optimisation/manipulation ("key words") | -| [I00032 Kavanaugh](../../generated_pages/incidents/I00032.md) | SEO optimisation/manipulation ("key words") | -| [I00044 JadeHelm exercise](../../generated_pages/incidents/I00044.md) | SEO optimisation/manipulation ("key words") | -| [I00049 White Helmets: Chemical Weapons](../../generated_pages/incidents/I00049.md) | SEO optimisation/manipulation ("key words") | -| [I00050 #HandsOffVenezuela](../../generated_pages/incidents/I00050.md) | SEO optimisation/manipulation ("key words") | -| [I00051 Integrity Initiative](../../generated_pages/incidents/I00051.md) | SEO optimisation/manipulation ("key words") | -| [I00056 Iran Influence Operations](../../generated_pages/incidents/I00056.md) | SEO optimisation/manipulation ("key words") | -| [I00063 Olympic Doping Scandal](../../generated_pages/incidents/I00063.md) | SEO optimisation/manipulation ("key words") | diff --git a/generated_pages/techniques/T0047.md b/generated_pages/techniques/T0047.md index f87aeef..b1399d5 100644 --- a/generated_pages/techniques/T0047.md +++ b/generated_pages/techniques/T0047.md @@ -7,7 +7,6 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00033 China 50cent Army](../../generated_pages/incidents/I00033.md) | cow online opinion leaders into submission, muzzling social media as a political force | diff --git a/generated_pages/techniques/T0048.md b/generated_pages/techniques/T0048.md index 6014cfb..2502a1f 100644 --- a/generated_pages/techniques/T0048.md +++ b/generated_pages/techniques/T0048.md @@ -7,7 +7,6 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00033 China 50cent Army](../../generated_pages/incidents/I00033.md) | cow online opinion leaders into submission, muzzling social media as a political force | | [I00122 The Extreme Right on Discord](../../generated_pages/incidents/I00122.md) | Discord is an example of a T0151.004: Chat Platform, which allows users to create their own T0151.005: Chat Community Server. The Institute for Strategic Dialog (ISD) conducted an investigation into the extreme right’s usage of Discord servers:

Discord is a free service accessible via phones and computers. It allows users to talk to each other in real time via voice, text or video chat and emerged in 2015 as a platform designed to assist gamers in communicating with each other while playing video games. The popularity of the platform has surged in recent years, and it is currently estimated to have 140 million monthly active users.

Chatrooms – known as servers - in the platform can be created by anyone, and they are used for a range of purposes that extend far beyond gaming. Such purposes include the discussion of extreme right-wing ideologies and the planning of offline extremist activity. Ahead of the far-right Unite the Right rally in Charlottesville, Virginia, in August 2017, organisers used Discord to plan and promote events and posted swastikas and praised Hitler in chat rooms with names like “National Socialist Army” and “Führer’s Gas Chamber”.


In this example a Discord server was used to organise the 2017 Charlottesville Unite the Right rally. Chat rooms such in the server were used to discuss different topics related to the rally (T0057: Organise Events, T0126.002: Facilitate Logistics or Support for Attendance, T0151.004: Chat Platform, T0151.005: Chat Community Server, T0151.006: Chat Room).

Another primary activity engaged in the servers analysed are raids against other servers associated with political opponents, and in particular those that appear to be pro-LGBTQ. Raids are a phenomenon in which a small group of users will join a Discord server with the sole purpose of spamming the host with offensive or incendiary messages and content with the aim of upsetting local users or having the host server banned by Discord. On two servers examined here, raiding was their primary function.

Among servers devoted to this activity, specific channels were often created to host links to servers that users were then encouraged to raid. Users are encouraged to be as offensive as possible with the aim of upsetting or angering users on the raided server, and channels often had content banks of offensive memes and content to be shared on raided servers.

The use of raids demonstrates the gamified nature of extremist activity on Discord, where use of the platform and harassment of political opponents is itself turned into a type of real-life video game designed to strengthen in-group affiliation. This combined with the broader extremist activity identified in these channels suggests that the combative activity of raiding could provide a pathway for younger people to become more engaged with extremist activity.


Discord servers were used by members of the extreme right to coordinate harassment of targeted communities (T0048: Harass, T0049.005: Conduct Swarming, T0151.004: Chat Platform, T0151.005: Chat Community Server). | | [I00123 The Extreme Right on Steam](../../generated_pages/incidents/I00123.md) | ISD conducted an investigation into the usage of social groups on Steam. Steam is an online platform used to buy and sell digital games, and includes the Steam community feature, which “allows users to find friends and join groups and discussion forums, while also offering in-game voice and text chat”. Actors have used Steam’s social capabilities to enable online harm campaigns:

One function of these Steam groups is the organisation of ‘raids’ – coordinated trolling activity against their political opponents. An example of this can be seen in a white power music group sharing a link to an Israeli Steam group, encouraging other members to “help me raid this juden [German word for Jew] group”. The comments section of said target group show that neo-Nazi and antisemitic comments were consistently posted in the group just two minutes after the instruction had been posted in the extremist group, highlighting the swiftness with which racially motivated harassment can be directed online.

Threat actors used social groups on Steam to organise harassment of targets (T0152.009: Software Delivery Platform, T0151.002: Online Community Group, T0049.005: Conduct Swarming, T0048: Harass). | diff --git a/generated_pages/techniques/T0049.md b/generated_pages/techniques/T0049.md index 5c457b5..3d432fe 100644 --- a/generated_pages/techniques/T0049.md +++ b/generated_pages/techniques/T0049.md @@ -7,8 +7,6 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00033 China 50cent Army](../../generated_pages/incidents/I00033.md) | 2,000,000 people (est.) part of state run/sponsored astroturfing | -| [I00034 DibaFacebookExpedition](../../generated_pages/incidents/I00034.md) | flood the Facebook pages of Taiwanese politicians and news agencies with a pro-PRC message, Democratic Progressive Party (DPP), attracted nearly 40,000 Facebook comments in just eight hours. | diff --git a/generated_pages/techniques/T0057.md b/generated_pages/techniques/T0057.md index 52cced7..d9aeb9f 100644 --- a/generated_pages/techniques/T0057.md +++ b/generated_pages/techniques/T0057.md @@ -7,10 +7,6 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00005 Brexit vote](../../generated_pages/incidents/I00005.md) | Digital to physical "organize+promote" rallies & events? | -| [I00017 US presidential elections](../../generated_pages/incidents/I00017.md) | Digital to physical "organize+promote" rallies & events | -| [I00032 Kavanaugh](../../generated_pages/incidents/I00032.md) | Digital to physical "organize+promote" rallies & events? | -| [I00053 China Huawei CFO Arrest](../../generated_pages/incidents/I00053.md) | Events coordinated and promoted across media platforms, Extend digital the physical space… gatherings ie: support for Meng outside courthouse | | [I00122 The Extreme Right on Discord](../../generated_pages/incidents/I00122.md) | Discord is an example of a T0151.004: Chat Platform, which allows users to create their own T0151.005: Chat Community Server. The Institute for Strategic Dialog (ISD) conducted an investigation into the extreme right’s usage of Discord servers:

Discord is a free service accessible via phones and computers. It allows users to talk to each other in real time via voice, text or video chat and emerged in 2015 as a platform designed to assist gamers in communicating with each other while playing video games. The popularity of the platform has surged in recent years, and it is currently estimated to have 140 million monthly active users.

Chatrooms – known as servers - in the platform can be created by anyone, and they are used for a range of purposes that extend far beyond gaming. Such purposes include the discussion of extreme right-wing ideologies and the planning of offline extremist activity. Ahead of the far-right Unite the Right rally in Charlottesville, Virginia, in August 2017, organisers used Discord to plan and promote events and posted swastikas and praised Hitler in chat rooms with names like “National Socialist Army” and “Führer’s Gas Chamber”.


In this example a Discord server was used to organise the 2017 Charlottesville Unite the Right rally. Chat rooms such in the server were used to discuss different topics related to the rally (T0057: Organise Events, T0126.002: Facilitate Logistics or Support for Attendance, T0151.004: Chat Platform, T0151.005: Chat Community Server, T0151.006: Chat Room).

Another primary activity engaged in the servers analysed are raids against other servers associated with political opponents, and in particular those that appear to be pro-LGBTQ. Raids are a phenomenon in which a small group of users will join a Discord server with the sole purpose of spamming the host with offensive or incendiary messages and content with the aim of upsetting local users or having the host server banned by Discord. On two servers examined here, raiding was their primary function.

Among servers devoted to this activity, specific channels were often created to host links to servers that users were then encouraged to raid. Users are encouraged to be as offensive as possible with the aim of upsetting or angering users on the raided server, and channels often had content banks of offensive memes and content to be shared on raided servers.

The use of raids demonstrates the gamified nature of extremist activity on Discord, where use of the platform and harassment of political opponents is itself turned into a type of real-life video game designed to strengthen in-group affiliation. This combined with the broader extremist activity identified in these channels suggests that the combative activity of raiding could provide a pathway for younger people to become more engaged with extremist activity.


Discord servers were used by members of the extreme right to coordinate harassment of targeted communities (T0048: Harass, T0049.005: Conduct Swarming, T0151.004: Chat Platform, T0151.005: Chat Community Server). | diff --git a/generated_pages/techniques/T0061.md b/generated_pages/techniques/T0061.md index 195e54d..b00e5fa 100644 --- a/generated_pages/techniques/T0061.md +++ b/generated_pages/techniques/T0061.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain) reveal a website set up to sell merchandise (T0152.004: Website, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.<.i>

Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server, T0149.006: IP Address). | +| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain Asset) reveal a website set up to sell merchandise (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website Asset, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.


Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain Asset, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server Asset, T0149.006: IP Address Asset). | diff --git a/generated_pages/techniques/T0068.md b/generated_pages/techniques/T0068.md index 7881c60..e843851 100644 --- a/generated_pages/techniques/T0068.md +++ b/generated_pages/techniques/T0068.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00106 Facebook Is Being Flooded With Gross AI-Generated Images of Hurricane Helene Devastation](../../generated_pages/incidents/I00106.md) | As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.

A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.

But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.

Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.

But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.

The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.


A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis).

The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account, T0148.007: eCommerce Platform). | +| [I00106 Facebook Is Being Flooded With Gross AI-Generated Images of Hurricane Helene Devastation](../../generated_pages/incidents/I00106.md) | As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.

A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.

But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.

Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.

But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.

The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.


A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis).

The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account Asset, T0148.007: eCommerce Platform). | diff --git a/generated_pages/techniques/T0084.004.md b/generated_pages/techniques/T0084.004.md index c4e1f85..fd23a6a 100644 --- a/generated_pages/techniques/T0084.004.md +++ b/generated_pages/techniques/T0084.004.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00102 Ignore The Poway Synagogue Shooter’s Manifesto: Pay Attention To 8chan’s /pol/ Board](../../generated_pages/incidents/I00102.md) | On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | +| [I00102 Ignore The Poway Synagogue Shooter’s Manifesto: Pay Attention To 8chan’s /pol/ Board](../../generated_pages/incidents/I00102.md) | On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | diff --git a/generated_pages/techniques/T0085.004.md b/generated_pages/techniques/T0085.004.md index 5044395..5254e87 100644 --- a/generated_pages/techniques/T0085.004.md +++ b/generated_pages/techniques/T0085.004.md @@ -8,8 +8,8 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | | [I00071 Russia-aligned hacktivists stir up anti-Ukrainian sentiments in Poland](../../generated_pages/incidents/I00071.md) | “On August 16, 2022, pro-Kremlin Telegram channel Joker DPR (Джокер ДНР) published a forged letter allegedly written by Ukrainian Foreign Minister Dmytro Kuleba. In the letter, Kuleba supposedly asked relevant Polish authorities to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII.

[...]

The letter is not dated, and Dmytro Kuleba’s signature seems to be copied from a publicly available letter signed by him in 2021.”


In this example the Telegram channel Joker DPR published a forged letter (T0085.004: Develop Document) in which they impersonated the Ukrainian Minister of Foreign Affairs (T0097.111: Government Official Persona, T0143.003: Impersonated Persona), using Ministry letterhead (T0097.206: Government Institution Persona, T0143.003: Impersonated Persona). | -| [I00102 Ignore The Poway Synagogue Shooter’s Manifesto: Pay Attention To 8chan’s /pol/ Board](../../generated_pages/incidents/I00102.md) | On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | -| [I00126 Charming Kitten Updates POWERSTAR with an InterPlanetary Twist](../../generated_pages/incidents/I00126.md) | The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware). | +| [I00102 Ignore The Poway Synagogue Shooter’s Manifesto: Pay Attention To 8chan’s /pol/ Board](../../generated_pages/incidents/I00102.md) | On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | +| [I00126 Charming Kitten Updates POWERSTAR with an InterPlanetary Twist](../../generated_pages/incidents/I00126.md) | The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain Asset) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware Asset). | diff --git a/generated_pages/techniques/T0085.005.md b/generated_pages/techniques/T0085.005.md index 0eac38b..f135efd 100644 --- a/generated_pages/techniques/T0085.005.md +++ b/generated_pages/techniques/T0085.005.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00110 How COVID-19 conspiracists and extremists use crowdfunding platforms to fund their activities](../../generated_pages/incidents/I00110.md) | The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:



More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | +| [I00110 How COVID-19 conspiracists and extremists use crowdfunding platforms to fund their activities](../../generated_pages/incidents/I00110.md) | The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:

More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | diff --git a/generated_pages/techniques/T0085.md b/generated_pages/techniques/T0085.md index b6823b5..bef3480 100644 --- a/generated_pages/techniques/T0085.md +++ b/generated_pages/techniques/T0085.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00111 Patreon Is Bankrolling Climate Change Deniers While We All Burn](../../generated_pages/incidents/I00111.md) | In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access). | +| [I00111 Patreon Is Bankrolling Climate Change Deniers While We All Burn](../../generated_pages/incidents/I00111.md) | In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account Asset, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access Asset). | diff --git a/generated_pages/techniques/T0086.002.md b/generated_pages/techniques/T0086.002.md index d0cac75..e10ef90 100644 --- a/generated_pages/techniques/T0086.002.md +++ b/generated_pages/techniques/T0086.002.md @@ -7,9 +7,9 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00099 More Women Are Facing The Reality Of Deepfakes, And They’re Ruining Lives](../../generated_pages/incidents/I00099.md) | Last October, British writer Helen was alerted to a series of deepfakes on a porn site that appear to show her engaging in extreme acts of sexual violence. That night, the images replayed themselves over and over in horrific nightmares and she was gripped by an all-consuming feeling of dread. “It’s like you’re in a tunnel, going further and further into this enclosed space, where there’s no light,” she tells Vogue. This feeling pervaded Helen’s life. Whenever she left the house, she felt exposed. On runs, she experienced panic attacks. Helen still has no idea who did this to her.

[...]

Amnesty International has been investigating the effects of abuse against women on Twitter, specifically in relation to how they act online thereafter. According to the charity, abuse creates what they’ve called “the silencing effect” whereby women feel discouraged from participating online. The same can be said for victims of deepfakes.

Helen has never been afraid to use her voice, writing deeply personal accounts of postnatal depression. But the deepfakes created a feeling of shame so strong she thought she’d be carrying this “dirty secret” forever, and so she stopped writing.

[...]

Meanwhile, deepfake ‘communities’ are thriving. There are now dedicated sites, user-friendly apps and organised ‘request’ procedures. Some sites allow you to commission custom deepfakes for £25, while on others you can upload a woman’s image and a bot will strip her naked.

“This violation is not something that should be normalised,” says Gibi, an ASMR artist with 3.13 million YouTube subscribers. Gibi has given up trying to keep tabs on the deepfakes of her. For Gibi, the most egregious part of all of this is the fact that people are “profiting off my face, doing something that I didn’t consent to, like my suffering is your livelihood.” She’s even been approached by a company offering to remove the deepfakes — for £500 a video. This has to end. But how?


A website hosting pornographic content provided users the ability to create deepfake content (T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)).

Another website enabled users to commission custom deepfakes (T0152.004: Website, T0148.004: Payment Processing Capability, T0086.002: Develop AI-Generated Images (Deepfakes), T0155.005: Paid Access). | +| [I00099 More Women Are Facing The Reality Of Deepfakes, And They’re Ruining Lives](../../generated_pages/incidents/I00099.md) | Last October, British writer Helen was alerted to a series of deepfakes on a porn site that appear to show her engaging in extreme acts of sexual violence. That night, the images replayed themselves over and over in horrific nightmares and she was gripped by an all-consuming feeling of dread. “It’s like you’re in a tunnel, going further and further into this enclosed space, where there’s no light,” she tells Vogue. This feeling pervaded Helen’s life. Whenever she left the house, she felt exposed. On runs, she experienced panic attacks. Helen still has no idea who did this to her.

[...]

Amnesty International has been investigating the effects of abuse against women on Twitter, specifically in relation to how they act online thereafter. According to the charity, abuse creates what they’ve called “the silencing effect” whereby women feel discouraged from participating online. The same can be said for victims of deepfakes.

Helen has never been afraid to use her voice, writing deeply personal accounts of postnatal depression. But the deepfakes created a feeling of shame so strong she thought she’d be carrying this “dirty secret” forever, and so she stopped writing.

[...]

Meanwhile, deepfake ‘communities’ are thriving. There are now dedicated sites, user-friendly apps and organised ‘request’ procedures. Some sites allow you to commission custom deepfakes for £25, while on others you can upload a woman’s image and a bot will strip her naked.

“This violation is not something that should be normalised,” says Gibi, an ASMR artist with 3.13 million YouTube subscribers. Gibi has given up trying to keep tabs on the deepfakes of her. For Gibi, the most egregious part of all of this is the fact that people are “profiting off my face, doing something that I didn’t consent to, like my suffering is your livelihood.” She’s even been approached by a company offering to remove the deepfakes — for £500 a video. This has to end. But how?


A website hosting pornographic content provided users the ability to create deepfake content (T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)).

Another website enabled users to commission custom deepfakes (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0086.002: Develop AI-Generated Images (Deepfakes), T0155.005: Paid Access Asset). | | [I00100 Why ThisPersonDoesNotExist (and its copycats) need to be restricted](../../generated_pages/incidents/I00100.md) | You might have heard about the recent viral sensation, ThisPersonDoesNotExist.com, a website, launched two weeks ago, that uses Nvidia’s publicly available artificial intelligence technology to draw an invented, photo-realistic human being with each refresh. The tech is impressive and artistically evocative. It’s also irresponsible and needs to be restricted immediately.

[...]

Prior to this technology, scammers faced three major risks when using fake photos. Each of these risks had the potential to put them out business, or in jail.

Risk #1: Someone recognizes the photo. While the odds of this are long-shot, it does happen.

Risk #2: Someone reverse image searches the photo with a service like TinEye or Google Image Search and finds that it’s been posted elsewhere. Reverse image search is one of the top anti-fraud measures recommended by consumer protection advocates.

Risk #3: If the crime is successful, law enforcement uses the fake photo to figure out the scammer’s identity after the fact. Perhaps the scammer used an old classmate’s photo. Perhaps their personal account follows the Instagram member they pilfered. And so on: people make mistakes.

The problem with AI-generated photos is that they carry none of these risks. No one will recognize a human who’s never existed before. Google Image Search will return 0 results, possibly instilling a false sense of security in the searcher. And AI-generated photos don’t give law enforcement much to work with.


ThisPersonDoesNotExist is an online platform which, when visited, produces AI generated images of peoples’ faces (T0146.006: Open Access Platform, T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)). | -| [I00106 Facebook Is Being Flooded With Gross AI-Generated Images of Hurricane Helene Devastation](../../generated_pages/incidents/I00106.md) | As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.

A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.

But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.

Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.

But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.

The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.


A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis).

The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account, T0148.007: eCommerce Platform). | +| [I00106 Facebook Is Being Flooded With Gross AI-Generated Images of Hurricane Helene Devastation](../../generated_pages/incidents/I00106.md) | As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.

A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.

But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.

Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.

But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.

The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.


A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis).

The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account Asset, T0148.007: eCommerce Platform). | diff --git a/generated_pages/techniques/T0087.001.md b/generated_pages/techniques/T0087.001.md index 5840254..d775f9a 100644 --- a/generated_pages/techniques/T0087.001.md +++ b/generated_pages/techniques/T0087.001.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00096 China ramps up use of AI misinformation](../../generated_pages/incidents/I00096.md) | The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | +| [I00096 China ramps up use of AI misinformation](../../generated_pages/incidents/I00096.md) | The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account Asset, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account Asset, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | diff --git a/generated_pages/techniques/T0087.md b/generated_pages/techniques/T0087.md index 0a52269..8a3e45b 100644 --- a/generated_pages/techniques/T0087.md +++ b/generated_pages/techniques/T0087.md @@ -7,9 +7,9 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00110 How COVID-19 conspiracists and extremists use crowdfunding platforms to fund their activities](../../generated_pages/incidents/I00110.md) | The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:



More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | -| [I00111 Patreon Is Bankrolling Climate Change Deniers While We All Burn](../../generated_pages/incidents/I00111.md) | In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access). | -| [I00125 The Agency](../../generated_pages/incidents/I00125.md) | In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:

[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.

Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed).

A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. | +| [I00110 How COVID-19 conspiracists and extremists use crowdfunding platforms to fund their activities](../../generated_pages/incidents/I00110.md) | The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:

More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | +| [I00111 Patreon Is Bankrolling Climate Change Deniers While We All Burn](../../generated_pages/incidents/I00111.md) | In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account Asset, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access Asset). | +| [I00125 The Agency](../../generated_pages/incidents/I00125.md) | In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:

[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.

Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account Asset, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed Asset).

A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. | diff --git a/generated_pages/techniques/T0088.001.md b/generated_pages/techniques/T0088.001.md index 0598dc5..61d9d13 100644 --- a/generated_pages/techniques/T0088.001.md +++ b/generated_pages/techniques/T0088.001.md @@ -8,8 +8,8 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | | [I00068 Attempted Audio Deepfake Call Targets LastPass Employee](../../generated_pages/incidents/I00068.md) | “While reports of [...] deepfake calls targeting private companies are luckily still rare, LastPass itself experienced a deepfake attempt earlier today that we are sharing with the larger community to help raise awareness that this tactic is spreading and all companies should be on the alert. In our case, an employee received a series of calls, texts, and at least one voicemail featuring an audio deepfake from a threat actor impersonating our CEO via WhatsApp. As the attempted communication was outside of normal business communication channels and due to the employee’s suspicion regarding the presence of many of the hallmarks of a social engineering attempt (such as forced urgency), our employee rightly ignored the messages and reported the incident to our internal security team so that we could take steps to both mitigate the threat and raise awareness of the tactic both internally and externally.”

In this example attackers impersonated the CEO of LastPass (T0097.100: Individual Persona, T0143.003: Impersonated Persona), targeting one of its employees over WhatsApp (T0151.004: Chat Platform,T0155.007: Encrypted Communication Channel) using deepfaked audio (T0088.001: Develop AI-Generated Audio (Deepfakes)). | -| [I00096 China ramps up use of AI misinformation](../../generated_pages/incidents/I00096.md) | The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | -| [I00103 The racist AI deepfake that fooled and divided a community](../../generated_pages/incidents/I00103.md) | “I seriously don't understand why I have to constantly put up with these dumbasses here every day.”

So began what appeared to be a long tirade from the principal of Pikesville High School, punctuated with racist, antisemitic and offensive tropes. It sounded like it had been secretly recorded.

The speaker went on to bemoan “ungrateful black kids” and Jewish people in the community.

The clip, first posted in [January 2024], went viral nationally. But it really struck a nerve in the peaceful, leafy suburb of Pikesville, which has large black and Jewish populations, and in the nearby city of Baltimore, Maryland. Principal Eric Eiswert was put on paid administrative leave pending an investigation.

[...]

But what those sharing the clip didn’t realise at the time was that another bombshell was about to drop: the clip was an AI-generated fake.

[...]

[In April 2024], Baltimore Police Chief Robert McCullough confirmed they now had “conclusive evidence that the recording was not authentic”.

And they believed they knew who made the fake.

Police charged 31-year-old Dazhon Darien, the school’s athletics director, with several counts related to the fake video. Charges included theft, retaliating against a witness and stalking.

He was arrested at the airport, where police say he was planning to fly to Houston, Texas.

Police say that Mr Darien had been under investigation by Principal Eiswert over an alleged theft of $1,916 (£1,460) from the school. They also allege there had been “work performance challenges” and his contract was likely not to be renewed.

Their theory was that by creating the deepfake recording, he hoped to discredit the principal before he could be fired.

Investigators say they traced an email used to send the original video to a server connected to Mr Darien, and allege that he used Baltimore County Public Schools' computer network to access AI tools. He is due to stand trial in December 2024.


By associating Mr Darien to the server used to email the original AI generated audio, investigators link Darien to the fabricated content (T0149.005: Server, T0088.001: AI Generated Audio (Deepfakes)). They also assert that Darien used computers owned by the school to access platforms used to generate the audio (T0146: Account, T0154.002: AI Media Platform). | +| [I00096 China ramps up use of AI misinformation](../../generated_pages/incidents/I00096.md) | The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account Asset, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account Asset, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | +| [I00103 The racist AI deepfake that fooled and divided a community](../../generated_pages/incidents/I00103.md) | “I seriously don't understand why I have to constantly put up with these dumbasses here every day.”

So began what appeared to be a long tirade from the principal of Pikesville High School, punctuated with racist, antisemitic and offensive tropes. It sounded like it had been secretly recorded.

The speaker went on to bemoan “ungrateful black kids” and Jewish people in the community.

The clip, first posted in [January 2024], went viral nationally. But it really struck a nerve in the peaceful, leafy suburb of Pikesville, which has large black and Jewish populations, and in the nearby city of Baltimore, Maryland. Principal Eric Eiswert was put on paid administrative leave pending an investigation.

[...]

But what those sharing the clip didn’t realise at the time was that another bombshell was about to drop: the clip was an AI-generated fake.

[...]

[In April 2024], Baltimore Police Chief Robert McCullough confirmed they now had “conclusive evidence that the recording was not authentic”.

And they believed they knew who made the fake.

Police charged 31-year-old Dazhon Darien, the school’s athletics director, with several counts related to the fake video. Charges included theft, retaliating against a witness and stalking.

He was arrested at the airport, where police say he was planning to fly to Houston, Texas.

Police say that Mr Darien had been under investigation by Principal Eiswert over an alleged theft of $1,916 (£1,460) from the school. They also allege there had been “work performance challenges” and his contract was likely not to be renewed.

Their theory was that by creating the deepfake recording, he hoped to discredit the principal before he could be fired.

Investigators say they traced an email used to send the original video to a server connected to Mr Darien, and allege that he used Baltimore County Public Schools' computer network to access AI tools. He is due to stand trial in December 2024.


By associating Mr Darien to the server used to email the original AI generated audio, investigators link Darien to the fabricated content (T0149.005: Server Asset, T0088.001: AI Generated Audio (Deepfakes)). They also assert that Darien used computers owned by the school to access platforms used to generate the audio (T0146: Account Asset, T0154.002: AI Media Platform). | diff --git a/generated_pages/techniques/T0088.md b/generated_pages/techniques/T0088.md index 962e313..f2b93a6 100644 --- a/generated_pages/techniques/T0088.md +++ b/generated_pages/techniques/T0088.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00111 Patreon Is Bankrolling Climate Change Deniers While We All Burn](../../generated_pages/incidents/I00111.md) | In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access). | +| [I00111 Patreon Is Bankrolling Climate Change Deniers While We All Burn](../../generated_pages/incidents/I00111.md) | In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account Asset, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access Asset). | diff --git a/generated_pages/techniques/T0089.001.md b/generated_pages/techniques/T0089.001.md index a958d45..7663820 100644 --- a/generated_pages/techniques/T0089.001.md +++ b/generated_pages/techniques/T0089.001.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00118 ‘War Thunder’ players are once again leaking sensitive military technology information on a video game forum](../../generated_pages/incidents/I00118.md) | In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.

This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.

A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.

The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.

The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual.

[...]

A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.


A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). | +| [I00118 ‘War Thunder’ players are once again leaking sensitive military technology information on a video game forum](../../generated_pages/incidents/I00118.md) | In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.

This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.

A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.

The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.

The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual.

[...]

A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.


A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account Asset, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). | diff --git a/generated_pages/techniques/T0089.md b/generated_pages/techniques/T0089.md index cb2d836..d2ff5f2 100644 --- a/generated_pages/techniques/T0089.md +++ b/generated_pages/techniques/T0089.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00119 Independent journalist publishes Trump campaign document hacked by Iran despite election interference concerns](../../generated_pages/incidents/I00119.md) | An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog, T0150.003: Pre-Existing). | +| [I00119 Independent journalist publishes Trump campaign document hacked by Iran despite election interference concerns](../../generated_pages/incidents/I00119.md) | An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account Asset, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog Asset, T0150.003: Pre-Existing Asset). | diff --git a/generated_pages/techniques/T0092.md b/generated_pages/techniques/T0092.md index b68d8a0..b5d875d 100644 --- a/generated_pages/techniques/T0092.md +++ b/generated_pages/techniques/T0092.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00108 How you thought you support the animals and you ended up funding white supremacists](../../generated_pages/incidents/I00108.md) | This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account, T0153.005: Online Advertising Platform). | +| [I00108 How you thought you support the animals and you ended up funding white supremacists](../../generated_pages/incidents/I00108.md) | This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website Asset), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account Asset, T0153.005: Online Advertising Platform). | diff --git a/generated_pages/techniques/T0097.100.md b/generated_pages/techniques/T0097.100.md index db21916..d8153fd 100644 --- a/generated_pages/techniques/T0097.100.md +++ b/generated_pages/techniques/T0097.100.md @@ -9,7 +9,7 @@ | -------- | -------------------- | | [I00068 Attempted Audio Deepfake Call Targets LastPass Employee](../../generated_pages/incidents/I00068.md) | “While reports of [...] deepfake calls targeting private companies are luckily still rare, LastPass itself experienced a deepfake attempt earlier today that we are sharing with the larger community to help raise awareness that this tactic is spreading and all companies should be on the alert. In our case, an employee received a series of calls, texts, and at least one voicemail featuring an audio deepfake from a threat actor impersonating our CEO via WhatsApp. As the attempted communication was outside of normal business communication channels and due to the employee’s suspicion regarding the presence of many of the hallmarks of a social engineering attempt (such as forced urgency), our employee rightly ignored the messages and reported the incident to our internal security team so that we could take steps to both mitigate the threat and raise awareness of the tactic both internally and externally.”

In this example attackers impersonated the CEO of LastPass (T0097.100: Individual Persona, T0143.003: Impersonated Persona), targeting one of its employees over WhatsApp (T0151.004: Chat Platform,T0155.007: Encrypted Communication Channel) using deepfaked audio (T0088.001: Develop AI-Generated Audio (Deepfakes)). | | [I00069 Uncharmed: Untangling Iran's APT42 Operations](../../generated_pages/incidents/I00069.md) | “[Iranian state-sponsored cyber espionage actor] APT42 cloud operations attack lifecycle can be described in details as follows:

- “Social engineering schemes involving decoys and trust building, which includes masquerading as legitimate NGOs and conducting ongoing correspondence with the target, sometimes lasting several weeks.
- The threat actor masqueraded as well-known international organizations in the legal and NGO fields and sent emails from domains typosquatting the original NGO domains, for example aspenlnstitute[.]org.
- The Aspen Institute became aware of this spoofed domain and collaborated with industry partners, including blocking it in SafeBrowsing, thus protecting users of Google Chrome and additional browsers.
- To increase their credibility, APT42 impersonated high-ranking personnel working at the aforementioned organizations when creating the email personas.
- APT42 enhanced their campaign credibility by using decoy material inviting targets to legitimate and relevant events and conferences. In one instance, the decoy material was hosted on an attacker-controlled SharePoint folder, accessible only after the victim entered their credentials. Mandiant did not identify malicious elements in the files, suggesting they were used solely to gain the victim’s trust.”


In this example APT42, an Iranian state-sponsored cyber espionage actor, created a domain impersonating the existing NGO The Aspen Institute (T0143.003: Impersonated Persona, T0097.207: NGO Persona). They increased the perceived legitimacy of the impersonation by also impersonating high-ranking employees of the NGO (T0097.100: Individual Persona, T0143.003: Impersonated Persona). | -| [I00119 Independent journalist publishes Trump campaign document hacked by Iran despite election interference concerns](../../generated_pages/incidents/I00119.md) | An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog, T0150.003: Pre-Existing). | +| [I00119 Independent journalist publishes Trump campaign document hacked by Iran despite election interference concerns](../../generated_pages/incidents/I00119.md) | An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account Asset, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog Asset, T0150.003: Pre-Existing Asset). | diff --git a/generated_pages/techniques/T0097.101.md b/generated_pages/techniques/T0097.101.md index 7ac78be..876176b 100644 --- a/generated_pages/techniques/T0097.101.md +++ b/generated_pages/techniques/T0097.101.md @@ -7,13 +7,12 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00071 Russia-aligned hacktivists stir up anti-Ukrainian sentiments in Poland](../../generated_pages/incidents/I00071.md) | “The August 17 [2022] Telegram post [which contained a falsified letter from the Ukrainian Minister of Foreign Affairs asking Poland to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII] also contained screenshots of Facebook posts that appeared on two Facebook accounts belonging to Polish nationals Piotr Górka, an expert in the history of the Polish Air Force, and Dariusz Walusiak, a Polish historian and documentary maker. The Górka post suggested that he fully supported the Polish government’s decision to change Belwederska Street to Stepan Bandera Street.

“In a statement to the DFRLab, Górka said his account was accessed without his consent. “This is not my post loaded to my Facebook page,” he explained. “My site was hacked, some days ago.” At the time of publishing, Piotr Górka’s post and his Facebook account were no longer accessible.

“The post on Górka’s Facebook page was shared by Dariusz Walusiak’s Facebook account; the account also reposted it on the Facebook walls of more than twenty other Facebook users, including Adam Kalita, currently working at Krakow branch of the Institute of National Remembrance; Jan Kasprzyk, head of the Office for War Veterans and Victims of Oppression; and Alicja Kondraciuk, a Polish public figure living in Krakow.

“Walusiak’s Facebook account is also no longer accessible. Given his work on Polish history and identity, it seems highly unlikely he would support the Bandera measure; the DFRLab has also reached out to him for comment.

“The fact that Joker DPR’s Telegram post included screenshots of their Facebook posts raises the strong possibility that both Facebook accounts were compromised, and that hackers planted false statements on their pages that would seem out of character for them in order to gain further attention to the forged documents.”


In this example, threat actors used compromised accounts of Polish historians who have enough relevant knowledge to plausibly weigh in on the forged letter’s narrative (T0143.003: Impersonated Persona, T0097.101: Local Persona, T0097.108: Expert Persona, T0146: Account, T0150.005: Compromised, T0151.001: Social Media Platform).

This matches T0097.108: Expert Persona because the impersonation exploited Górka and Walusiak’s existing personas as experts in Polish history. | +| [I00071 Russia-aligned hacktivists stir up anti-Ukrainian sentiments in Poland](../../generated_pages/incidents/I00071.md) | “The August 17 [2022] Telegram post [which contained a falsified letter from the Ukrainian Minister of Foreign Affairs asking Poland to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII] also contained screenshots of Facebook posts that appeared on two Facebook accounts belonging to Polish nationals Piotr Górka, an expert in the history of the Polish Air Force, and Dariusz Walusiak, a Polish historian and documentary maker. The Górka post suggested that he fully supported the Polish government’s decision to change Belwederska Street to Stepan Bandera Street.

“In a statement to the DFRLab, Górka said his account was accessed without his consent. “This is not my post loaded to my Facebook page,” he explained. “My site was hacked, some days ago.” At the time of publishing, Piotr Górka’s post and his Facebook account were no longer accessible.

“The post on Górka’s Facebook page was shared by Dariusz Walusiak’s Facebook account; the account also reposted it on the Facebook walls of more than twenty other Facebook users, including Adam Kalita, currently working at Krakow branch of the Institute of National Remembrance; Jan Kasprzyk, head of the Office for War Veterans and Victims of Oppression; and Alicja Kondraciuk, a Polish public figure living in Krakow.

“Walusiak’s Facebook account is also no longer accessible. Given his work on Polish history and identity, it seems highly unlikely he would support the Bandera measure; the DFRLab has also reached out to him for comment.

“The fact that Joker DPR’s Telegram post included screenshots of their Facebook posts raises the strong possibility that both Facebook accounts were compromised, and that hackers planted false statements on their pages that would seem out of character for them in order to gain further attention to the forged documents.”


In this example, threat actors used compromised accounts of Polish historians who have enough relevant knowledge to plausibly weigh in on the forged letter’s narrative (T0143.003: Impersonated Persona, T0097.101: Local Persona, T0097.108: Expert Persona, T0146: Account Asset, T0150.005: Compromised Asset, T0151.001: Social Media Platform).

This matches T0097.108: Expert Persona because the impersonation exploited Górka and Walusiak’s existing personas as experts in Polish history. | | [I00076 Network of Social Media Accounts Impersonates U.S. Political Candidates, Leverages U.S. and Israeli Media in Support of Iranian Interests](../../generated_pages/incidents/I00076.md) | “In addition to directly posting material on social media, we observed some personas in the network [of inauthentic accounts attributed to Iran] leverage legitimate print and online media outlets in the U.S. and Israel to promote Iranian interests via the submission of letters, guest columns, and blog posts that were then published. We also identified personas that we suspect were fabricated for the sole purpose of submitting such letters, but that do not appear to maintain accounts on social media. The personas claimed to be based in varying locations depending on the news outlets they were targeting for submission; for example, a persona that listed their location as Seattle, WA in a letter submitted to the Seattle Times subsequently claimed to be located in Baytown, TX in a letter submitted to The Baytown Sun. Other accounts in the network then posted links to some of these letters on social media.”

In this example actors fabricated individuals who lived in areas which were being targeted for influence through the use of letters to local papers (T0097.101: Local Persona, T0143.002: Fabricated Persona). | | [I00078 Meta’s September 2020 Removal of Coordinated Inauthentic Behavior](../../generated_pages/incidents/I00078.md) | “[Meta has] removed one Page, five Facebook accounts, one Group and three Instagram accounts for foreign or government interference which is coordinated inauthentic behavior on behalf of a foreign or government entity. This small network originated in Russia and focused primarily on Turkey and Europe, and also on the United States.

“This operation relied on fake accounts — some of which had been already detected and removed by our automated systems — to manage their Page and their Group, and to drive people to their site purporting to be an independent think-tank based primarily in Turkey. These accounts posed as locals based in Turkey, Canada and the US. They also recruited people to write for their website. This network had almost no following on our platforms when we removed it.”


Meta identified that a network of accounts originating in Russia were driving people off platform to a site which presented itself as a think-tank (T0097.204: Think Tank Persona). Meta did not make an attribution about the authenticity of this off-site think tank, so neither T0143.001: Authentic Persona or T0143.002: Fabricated Persona are used here.

Meta had access to technical data for accounts on its platform, and asserted that they were fabricated individuals posing as locals who recruited targets to write content for their website (T0097.101: Local Persona, T0097.106: Recruiter Persona, T0143.002: Fabricated Persona). | | [I00081 Belarus KGB created fake accounts to criticize Poland during border crisis, Facebook parent company says](../../generated_pages/incidents/I00081.md) | “Meta said it also removed 31 Facebook accounts, four groups, two events and four Instagram accounts that it believes originated in Poland and targeted Belarus and Iraq. Those allegedly fake accounts posed as Middle Eastern migrants posting about the border crisis. Meta did not link the accounts to a specific group.

““These fake personas claimed to be sharing their own negative experiences of trying to get from Belarus to Poland and posted about migrants’ difficult lives in Europe,” Meta said. “They also posted about Poland’s strict anti-migrant policies and anti-migrant neo-Nazi activity in Poland. They also shared links to news articles criticizing the Belarusian government’s handling of the border crisis and off-platform videos alleging migrant abuse in Europe.””


In this example accounts falsely presented themselves as having local insight into the border crisis narrative (T0097.101: Local Persona, T0143.002: Fabricated Persona). | | [I00086 #WeAreNotSafe – Exposing How a Post-October 7th Disinformation Network Operates on Israeli Social Media](../../generated_pages/incidents/I00086.md) | Accounts which were identified as part of “a sophisticated and extensive coordinated network orchestrating a disinformation campaign targeting Israeli digital spaces since October 7th, 2023” were presenting themselves as locals to Israel (T0097.101: Local Persona):

“Unlike usual low-effort fake accounts, these accounts meticulously mimic young Israelis. They stand out due to the extraordinary lengths taken to ensure their authenticity, from unique narratives to the content they produce to their seemingly authentic interactions.” | | [I00087 Challenging Truth and Trust: A Global Inventory of Organized Social Media Manipulation](../../generated_pages/incidents/I00087.md) | “Another actor operating in China is the American-based company Devumi. Most of the Twitter accounts managed by Devumi resemble real people, and some are even associated with a kind of large-scale social identity theft. At least 55,000 of the accounts use the names, profile pictures, hometowns and other personal details of real Twitter users, including minors, according to The New York Times (Confessore et al., 2018)).”

In this example accounts impersonated real locals while spreading operation narratives (T0143.003: Impersonated Persona, T0097.101: Local Persona). The impersonation included stealing the legitimate accounts’ profile pictures (T0145.001: Copy Account Imagery). | -| [I00125 The Agency](../../generated_pages/incidents/I00125.md) | In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:

[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.

Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed).

A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. | diff --git a/generated_pages/techniques/T0097.102.md b/generated_pages/techniques/T0097.102.md index b451a9a..9813b0c 100644 --- a/generated_pages/techniques/T0097.102.md +++ b/generated_pages/techniques/T0097.102.md @@ -10,9 +10,9 @@ | [I00076 Network of Social Media Accounts Impersonates U.S. Political Candidates, Leverages U.S. and Israeli Media in Support of Iranian Interests](../../generated_pages/incidents/I00076.md) | “Accounts in the network [of inauthentic accounts attributed to Iran], under the guise of journalist personas, also solicited various individuals over Twitter for interviews and chats, including real journalists and politicians. The personas appear to have successfully conducted remote video and audio interviews with U.S. and UK-based individuals, including a prominent activist, a radio talk show host, and a former U.S. Government official, and subsequently posted the interviews on social media, showing only the individual being interviewed and not the interviewer. The interviewees expressed views that Iran would likely find favorable, discussing topics such as the February 2019 Warsaw summit, an attack on a military parade in the Iranian city of Ahvaz, and the killing of Jamal Khashoggi.

“The provenance of these interviews appear to have been misrepresented on at least one occasion, with one persona appearing to have falsely claimed to be operating on behalf of a mainstream news outlet; a remote video interview with a US-based activist about the Jamal Khashoggi killing was posted by an account adopting the persona of a journalist from the outlet Newsday, with the Newsday logo also appearing in the video. We did not identify any Newsday interview with the activist in question on this topic. In another instance, a persona posing as a journalist directed tweets containing audio of an interview conducted with a former U.S. Government official at real media personalities, calling on them to post about the interview.”


In this example actors fabricated journalists (T0097.102: Journalist Persona, T0143.002: Fabricated Persona) who worked at existing news outlets (T0097.202: News Outlet Persona, T0143.003: Impersonated Persona) in order to conduct interviews with targeted individuals. | | [I00080 Hundreds Of Propaganda Accounts Targeting Iran And Qatar Have Been Removed From Facebook](../../generated_pages/incidents/I00080.md) | “One example of a fake reporter account targeting Americans is “Jenny Powell,” a self-described Washington-based journalist, volunteer, and environmental activist. At first glance, Powell’s Twitter timeline looks like it belongs to a young and eager reporter amplifying her interests. But her profile photo is a stock image, and many of her links go to the propaganda sites.

“Powell, who joined the platform just last month, shares links to stories from major US news media outlets, retweets local news about Washington, DC, and regularly promotes content from The Foreign Code and The Economy Club. Other fake journalist accounts behaved similarly to Powell and had generic descriptions. One of the accounts, for a fake Bruce Lopez in Louisiana, has a bio that describes him as a “Correspondent Traveler (noun) (linking verb) (noun/verb/adjective),” which appears to reveal the formula used to write Twitter bios for the accounts.”


The Jenny Powel account used in this influence operation presents as both a journalist and an activist (T0097.102: Journalist Persona, T0097.103: Activist Persona, T0143.002: Fabricated Persona). This example shows how threat actors can easily follow a template to present a fabricated persona to their target audience (T0144.002: Persona Template). | | [I00082 Meta’s November 2021 Adversarial Threat Report ](../../generated_pages/incidents/I00082.md) | “[Meta] removed 41 Facebook accounts, five Groups, and four Instagram accounts for violating our policy against coordinated inauthentic behavior. This activity originated in Belarus and primarily targeted audiences in the Middle East and Europe.

“The core of this activity began in October 2021, with some accounts created as recently as mid-November. The people behind it used newly-created fake accounts — many of which were detected and disabled by our automated systems soon after creation — to pose as journalists and activists from the European Union, particularly Poland and Lithuania. Some of the accounts used profile photos likely generated using artificial intelligence techniques like generative adversarial networks (GAN). These fictitious personas posted criticism of Poland in English, Polish, and Kurdish, including pictures and videos about Polish border guards allegedly violating migrants’ rights, and compared Poland’s treatment of migrants against other countries’. They also posted to Groups focused on the welfare of migrants in Europe. A few accounts posted in Russian about relations between Belarus and the Baltic States.”


This example shows how accounts identified as participating in coordinated inauthentic behaviour were presenting themselves as journalists and activists while spreading operation narratives (T0097.102: Journalist Persona, T0097.103: Activist Persona).

Additionally, analysts at Meta identified accounts which were participating in coordinated inauthentic behaviour that had likely used AI-Generated images as their profile pictures (T0145.002: AI-Generated Account Imagery). | -| [I00096 China ramps up use of AI misinformation](../../generated_pages/incidents/I00096.md) | The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | -| [I00119 Independent journalist publishes Trump campaign document hacked by Iran despite election interference concerns](../../generated_pages/incidents/I00119.md) | An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog, T0150.003: Pre-Existing). | -| [I00126 Charming Kitten Updates POWERSTAR with an InterPlanetary Twist](../../generated_pages/incidents/I00126.md) | The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware). | +| [I00096 China ramps up use of AI misinformation](../../generated_pages/incidents/I00096.md) | The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account Asset, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account Asset, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | +| [I00119 Independent journalist publishes Trump campaign document hacked by Iran despite election interference concerns](../../generated_pages/incidents/I00119.md) | An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account Asset, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog Asset, T0150.003: Pre-Existing Asset). | +| [I00126 Charming Kitten Updates POWERSTAR with an InterPlanetary Twist](../../generated_pages/incidents/I00126.md) | The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain Asset) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware Asset). | diff --git a/generated_pages/techniques/T0097.105.md b/generated_pages/techniques/T0097.105.md index 030edc5..652869a 100644 --- a/generated_pages/techniques/T0097.105.md +++ b/generated_pages/techniques/T0097.105.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00118 ‘War Thunder’ players are once again leaking sensitive military technology information on a video game forum](../../generated_pages/incidents/I00118.md) | In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.

This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.

A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.

The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.

The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual.

[...]

A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.


A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). | +| [I00118 ‘War Thunder’ players are once again leaking sensitive military technology information on a video game forum](../../generated_pages/incidents/I00118.md) | In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.

This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.

A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.

The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.

The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual.

[...]

A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.


A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account Asset, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). | diff --git a/generated_pages/techniques/T0097.108.md b/generated_pages/techniques/T0097.108.md index 5cff332..d7f4075 100644 --- a/generated_pages/techniques/T0097.108.md +++ b/generated_pages/techniques/T0097.108.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00071 Russia-aligned hacktivists stir up anti-Ukrainian sentiments in Poland](../../generated_pages/incidents/I00071.md) | “The August 17 [2022] Telegram post [which contained a falsified letter from the Ukrainian Minister of Foreign Affairs asking Poland to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII] also contained screenshots of Facebook posts that appeared on two Facebook accounts belonging to Polish nationals Piotr Górka, an expert in the history of the Polish Air Force, and Dariusz Walusiak, a Polish historian and documentary maker. The Górka post suggested that he fully supported the Polish government’s decision to change Belwederska Street to Stepan Bandera Street.

“In a statement to the DFRLab, Górka said his account was accessed without his consent. “This is not my post loaded to my Facebook page,” he explained. “My site was hacked, some days ago.” At the time of publishing, Piotr Górka’s post and his Facebook account were no longer accessible.

“The post on Górka’s Facebook page was shared by Dariusz Walusiak’s Facebook account; the account also reposted it on the Facebook walls of more than twenty other Facebook users, including Adam Kalita, currently working at Krakow branch of the Institute of National Remembrance; Jan Kasprzyk, head of the Office for War Veterans and Victims of Oppression; and Alicja Kondraciuk, a Polish public figure living in Krakow.

“Walusiak’s Facebook account is also no longer accessible. Given his work on Polish history and identity, it seems highly unlikely he would support the Bandera measure; the DFRLab has also reached out to him for comment.

“The fact that Joker DPR’s Telegram post included screenshots of their Facebook posts raises the strong possibility that both Facebook accounts were compromised, and that hackers planted false statements on their pages that would seem out of character for them in order to gain further attention to the forged documents.”


In this example, threat actors used compromised accounts of Polish historians who have enough relevant knowledge to plausibly weigh in on the forged letter’s narrative (T0143.003: Impersonated Persona, T0097.101: Local Persona, T0097.108: Expert Persona, T0146: Account, T0150.005: Compromised, T0151.001: Social Media Platform).

This matches T0097.108: Expert Persona because the impersonation exploited Górka and Walusiak’s existing personas as experts in Polish history. | +| [I00071 Russia-aligned hacktivists stir up anti-Ukrainian sentiments in Poland](../../generated_pages/incidents/I00071.md) | “The August 17 [2022] Telegram post [which contained a falsified letter from the Ukrainian Minister of Foreign Affairs asking Poland to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII] also contained screenshots of Facebook posts that appeared on two Facebook accounts belonging to Polish nationals Piotr Górka, an expert in the history of the Polish Air Force, and Dariusz Walusiak, a Polish historian and documentary maker. The Górka post suggested that he fully supported the Polish government’s decision to change Belwederska Street to Stepan Bandera Street.

“In a statement to the DFRLab, Górka said his account was accessed without his consent. “This is not my post loaded to my Facebook page,” he explained. “My site was hacked, some days ago.” At the time of publishing, Piotr Górka’s post and his Facebook account were no longer accessible.

“The post on Górka’s Facebook page was shared by Dariusz Walusiak’s Facebook account; the account also reposted it on the Facebook walls of more than twenty other Facebook users, including Adam Kalita, currently working at Krakow branch of the Institute of National Remembrance; Jan Kasprzyk, head of the Office for War Veterans and Victims of Oppression; and Alicja Kondraciuk, a Polish public figure living in Krakow.

“Walusiak’s Facebook account is also no longer accessible. Given his work on Polish history and identity, it seems highly unlikely he would support the Bandera measure; the DFRLab has also reached out to him for comment.

“The fact that Joker DPR’s Telegram post included screenshots of their Facebook posts raises the strong possibility that both Facebook accounts were compromised, and that hackers planted false statements on their pages that would seem out of character for them in order to gain further attention to the forged documents.”


In this example, threat actors used compromised accounts of Polish historians who have enough relevant knowledge to plausibly weigh in on the forged letter’s narrative (T0143.003: Impersonated Persona, T0097.101: Local Persona, T0097.108: Expert Persona, T0146: Account Asset, T0150.005: Compromised Asset, T0151.001: Social Media Platform).

This matches T0097.108: Expert Persona because the impersonation exploited Górka and Walusiak’s existing personas as experts in Polish history. | | [I00079 Three thousand fake tanks](../../generated_pages/incidents/I00079.md) | “The sixth [website to repost a confirmed false narrative investigated in this report] is an apparent think tank, the Center for Global Strategic Monitoring. This website describes itself, in English apparently written by a non-native speaker, as a “nonprofit and nonpartisan research and analysis institution dedicated to providing insights of the think tank community publications”. It does, indeed, publish think-tank reports on issues such as Turkey and US-China relations; however, the reports are the work of other think tanks, often unattributed (the two mentioned in this sentence were actually produced by the Brookings Institution, although the website makes no mention of the fact). It also fails to provide an address, or any other contact details other than an email, and its (long) list of experts includes entries apparently copied and pasted from other institutions. Thus, the “think tank” website which shared the fake story appears to be a fake itself.” In this example a website which amplified a false narrative presented itself as a think tank (T0097.204: Think Tank Persona).

This is an entirely fabricated persona (T0143.002: Fabricated Persona); it republished content from other think tanks without attribution (T0084.002: Plagiarise Content) and fabricated experts (T0097.108: Expert Persona, T0143.002: Fabricated Persona) to make it more believable that they were a real think tank. | diff --git a/generated_pages/techniques/T0097.109.md b/generated_pages/techniques/T0097.109.md index 76cc841..68ce922 100644 --- a/generated_pages/techniques/T0097.109.md +++ b/generated_pages/techniques/T0097.109.md @@ -7,6 +7,8 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | +| [I00064 Tinder nightmares: the promise and peril of political bots](../../generated_pages/incidents/I00064.md) | “In the days leading up to the UK’s [2019] general election, youths looking for love online encountered a whole new kind of Tinder nightmare. A group of young activists built a Tinder chatbot to co-opt profiles and persuade swing voters to support Labour. The bot accounts sent 30,000-40,000 messages to targeted 18-25 year olds in battleground constituencies like Dudley North, which Labour ended up winning by only 22 votes. [...]

“The activists maintain that the project was meant to foster democratic engagement. But screenshots of the bots’ activity expose a harsher reality. Images of conversations between real users and these bots, posted on i-D, Mashable, as well as on Fowler and Goodman’s public Twitter accounts, show that the bots did not identify themselves as automated accounts, instead posing as the user whose profile they had taken over. While conducting research for this story, it turned out that a number of [the reporters’ friends] living in Oxford had interacted with the bot in the lead up to the election and had no idea that it was not a real person.”


In this example people offered up their real accounts for the automation of political messaging; the actors convinced the users to give up access to their accounts to use in the operation. The actors maintained the accounts’ existing persona, and presented themselves as potential romantic suitors for legitimate platform users (T0097:109 Romantic Suitor Persona, T0143.003: Impersonated Persona, T0146: Account Asset, T0150.007: Rented Asset, T0151.017: Dating Platform). | +| [I00089 Hackers Use Fake Facebook Profiles of Attractive Women to Spread Viruses, Steal Passwords](../../generated_pages/incidents/I00089.md) | “On Facebook, Rita, Alona and Christina appeared to be just like the millions of other U.S citizens sharing their lives with the world. They discussed family outings, shared emojis and commented on each other's photographs.

“In reality, the three accounts were part of a highly-targeted cybercrime operation, used to spread malware that was able to steal passwords and spy on victims.

“Hackers with links to Lebanon likely ran the covert scheme using a strain of malware dubbed "Tempting Cedar Spyware," according to researchers from Prague-based anti-virus company Avast, which detailed its findings in a report released on Wednesday.

“In a honey trap tactic as old as time, the culprits' targets were mostly male, and lured by fake attractive women. 

“In the attack, hackers would send flirtatious messages using Facebook to the chosen victims, encouraging them to download a second , booby-trapped, chat application known as Kik Messenger to have "more secure" conversations. Upon analysis, Avast experts found that "many fell for the trap.””


In this example threat actors took on the persona of a romantic suitor on Facebook, directing their targets to another platform (T0097:109 Romantic Suitor Persona, T0145.006: Attractive Person Account Imagery, T0143.002: Fabricated Persona). | diff --git a/generated_pages/techniques/T0097.110.md b/generated_pages/techniques/T0097.110.md index ef605b9..79825a4 100644 --- a/generated_pages/techniques/T0097.110.md +++ b/generated_pages/techniques/T0097.110.md @@ -7,10 +7,10 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00065 'Ghostwriter' Influence Campaign: Unknown Actors Leverage Website Compromises and Fabricated Content to Push Narratives Aligned With Russian Security Interests](../../generated_pages/incidents/I00065.md) | _”Overall, narratives promoted in the five operations appear to represent a concerted effort to discredit the ruling political coalition, widen existing domestic political divisions and project an image of coalition disunity in Poland. In each incident, content was primarily disseminated via Twitter, Facebook, and/ or Instagram accounts belonging to Polish politicians, all of whom have publicly claimed their accounts were compromised at the times the posts were made."_

This example demonstrates how threat actors can use compromised accounts to distribute inauthentic content while exploiting the legitimate account holder’s persona (T0097.110: Party Official Persona, T0143.003: Impersonated Persona, T0146: Account, T0150.005: Compromised). | +| [I00065 'Ghostwriter' Influence Campaign: Unknown Actors Leverage Website Compromises and Fabricated Content to Push Narratives Aligned With Russian Security Interests](../../generated_pages/incidents/I00065.md) | _”Overall, narratives promoted in the five operations appear to represent a concerted effort to discredit the ruling political coalition, widen existing domestic political divisions and project an image of coalition disunity in Poland. In each incident, content was primarily disseminated via Twitter, Facebook, and/ or Instagram accounts belonging to Polish politicians, all of whom have publicly claimed their accounts were compromised at the times the posts were made."_

This example demonstrates how threat actors can use compromised accounts to distribute inauthentic content while exploiting the legitimate account holder’s persona (T0097.110: Party Official Persona, T0143.003: Impersonated Persona, T0146: Account Asset, T0150.005: Compromised Asset). | | [I00075 How Russia Meddles Abroad for Profit: Cash, Trolls and a Cult Leader](../../generated_pages/incidents/I00075.md) | “In the campaign’s final weeks, Pastor Mailhol said, the team of Russians made a request: Drop out of the race and support Mr. Rajoelina. He refused.

“The Russians made the same proposal to the history professor running for president, saying, “If you accept this deal you will have money” according to Ms. Rasamimanana, the professor’s campaign manager.

When the professor refused, she said, the Russians created a fake Facebook page that mimicked his official page and posted an announcement on it that he was supporting Mr. Rajoelina.”


In this example actors created online accounts styled to look like official pages to trick targets into thinking that the presidential candidate announced that they had dropped out of the election (T0097.110: Party Official Persona, T0143.003: Impersonated Persona) | | [I00076 Network of Social Media Accounts Impersonates U.S. Political Candidates, Leverages U.S. and Israeli Media in Support of Iranian Interests](../../generated_pages/incidents/I00076.md) | “Some Twitter accounts in the network [of inauthentic accounts attributed to Iran] impersonated Republican political candidates that ran for House of Representatives seats in the 2018 U.S. congressional midterms. These accounts appropriated the candidates’ photographs and, in some cases, plagiarized tweets from the real individuals’ accounts. Aside from impersonating real U.S. political candidates, the behavior and activity of these accounts resembled that of the others in the network.

“For example, the account @livengood_marla impersonated Marla Livengood, a 2018 candidate for California’s 9th Congressional District, using a photograph of Livengood and a campaign banner for its profile and background pictures. The account began tweeting on Sept. 24, 2018, with its first tweet plagiarizing one from Livengood’s official account earlier that month”

[...]

“In another example, the account @ButlerJineea impersonated Jineea Butler, a 2018 candidate for New York’s 13th Congressional District, using a photograph of Butler for its profile picture and incorporating her campaign slogans into its background picture, as well as claiming in its Twitter bio to be a “US House candidate, NY-13” and linking to Butler’s website, jineeabutlerforcongress[.]com.”


In this example actors impersonated existing political candidates (T0097.110: Member of Political Party Persona, T0143.003: Impersonated Persona), strengthening the impersonation by copying legitimate accounts’ imagery (T0145.001: Copy Account Imagery), and copying its previous posts (T0084.002: Plagiarise Content). | -| [I00096 China ramps up use of AI misinformation](../../generated_pages/incidents/I00096.md) | The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | +| [I00096 China ramps up use of AI misinformation](../../generated_pages/incidents/I00096.md) | The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account Asset, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account Asset, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | diff --git a/generated_pages/techniques/T0097.111.md b/generated_pages/techniques/T0097.111.md index 67e1c09..57b9b6a 100644 --- a/generated_pages/techniques/T0097.111.md +++ b/generated_pages/techniques/T0097.111.md @@ -10,7 +10,7 @@ | [I00071 Russia-aligned hacktivists stir up anti-Ukrainian sentiments in Poland](../../generated_pages/incidents/I00071.md) | “On August 16, 2022, pro-Kremlin Telegram channel Joker DPR (Джокер ДНР) published a forged letter allegedly written by Ukrainian Foreign Minister Dmytro Kuleba. In the letter, Kuleba supposedly asked relevant Polish authorities to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII.

[...]

The letter is not dated, and Dmytro Kuleba’s signature seems to be copied from a publicly available letter signed by him in 2021.”


In this example the Telegram channel Joker DPR published a forged letter (T0085.004: Develop Document) in which they impersonated the Ukrainian Minister of Foreign Affairs (T0097.111: Government Official Persona, T0143.003: Impersonated Persona), using Ministry letterhead (T0097.206: Government Institution Persona, T0143.003: Impersonated Persona). | | [I00084 Russia turns its diplomats into disinformation warriors](../../generated_pages/incidents/I00084.md) | “After the European Union banned Kremlin-backed media outlets and social media giants demoted their posts for peddling falsehoods about the war in Ukraine, Moscow has turned to its cadre of diplomats, government spokespeople and ministers — many of whom have extensive followings on social media — to promote disinformation about the conflict in Eastern Europe, according to four EU and United States officials.”

In this example authentic Russian government officials used their own accounts to promote false narratives (T0143.001: Authentic Persona, T0097.111: Government Official Persona).

The use of accounts managed by authentic Government / Diplomats to spread false narratives makes it harder for platforms to enforce content moderation, because of the political ramifications they may face for censoring elected officials (T0131: Exploit TOS/Content Moderation). For example, Twitter previously argued that official channels of world leaders are not removed due to the high public interest associated with their activities. | | [I00085 China’s large-scale media push: Attempts to influence Swedish media](../../generated_pages/incidents/I00085.md) | “Four media companies – Svenska Dagbladet, Expressen, Sveriges Radio, and Sveriges Television – stated that they had been contacted by the Chinese embassy on several occasions, and that they, for instance, had been criticized on their publications, both by letters and e-mails.

The media company Svenska Dagbladet, had been contacted on several occasions in the past two years, including via e-mails directly from the Chinese ambassador to Sweden. Several times, China and the Chinese ambassador had criticized the media company’s publications regarding the conditions in China. Individual reporters also reported having been subjected to criticism.

The tabloid Expressen had received several letters and e-mails from the embassy, e-mails containing criticism and threatening formulations regarding the coverage of the Swedish book publisher Gui Minhai, who has been imprisoned in China since 2015. Formulations such as “media tyranny” could be found in the e-mails.”


In this case, the Chinese ambassador is using their official role (T0143.001: Authentic Persona, T0097.111: Government Official Persona) to try to influence Swedish press. A government official trying to interfere in other countries' media activities could be a violation of press freedom. In this specific case, the Chinese diplomats are trying to silence criticism against China (T0139.002: Silence).” | -| [I00093 China Falsely Denies Disinformation Campaign Targeting Canada’s Prime Minister](../../generated_pages/incidents/I00093.md) | “On October 23, Canada’s Foreign Ministry said it had discovered a disinformation campaign, likely tied to China, aimed at discrediting dozens of Canadian politicians, including Prime Minister Justin Trudeau.

“The ministry said the campaign took place in August and September. It used new and hijacked social media accounts to bulk-post messages targeting Canadian politicians (T0146: Account, T0150.001: Newly Created, T0150.005: Compromised).

“A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.

““Canada was a downright liar and disseminator of false information… Beijing has never meddled in another nation’s domestic affairs.”

“A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.

“That is false.

“The Canadian government's report is based on an investigation conducted by its Rapid Response Mechanism cyber intelligence unit in cooperation with the social media platforms.

“The investigation exposed China’s disinformation campaign dubbed “Spamouflage” -- for its tactic of using “a network of new or hijacked social media accounts that posts and increases the number of propaganda messages across multiple social media platforms – including Facebook, X/Twitter, Instagram, YouTube, Medium, Reddit, TikTok, and LinkedIn.””


In this case a network of accounts attributed to China were identified operating on multiple platforms. The report was dismissed as false information by an official in the Chinese Embassy in Canada (T0143.001: Authentic Persona, T0097.111: Government Official Persona, T0129.006: Deny Involvement). | +| [I00093 China Falsely Denies Disinformation Campaign Targeting Canada’s Prime Minister](../../generated_pages/incidents/I00093.md) | “On October 23, Canada’s Foreign Ministry said it had discovered a disinformation campaign, likely tied to China, aimed at discrediting dozens of Canadian politicians, including Prime Minister Justin Trudeau.

“The ministry said the campaign took place in August and September. It used new and hijacked social media accounts to bulk-post messages targeting Canadian politicians (T0146: Account Asset, T0150.001: Newly Created Asset, T0150.005: Compromised Asset).

“A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.

““Canada was a downright liar and disseminator of false information… Beijing has never meddled in another nation’s domestic affairs.”

“A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.

“That is false.

“The Canadian government's report is based on an investigation conducted by its Rapid Response Mechanism cyber intelligence unit in cooperation with the social media platforms.

“The investigation exposed China’s disinformation campaign dubbed “Spamouflage” -- for its tactic of using “a network of new or hijacked social media accounts that posts and increases the number of propaganda messages across multiple social media platforms – including Facebook, X/Twitter, Instagram, YouTube, Medium, Reddit, TikTok, and LinkedIn.””


In this case a network of accounts attributed to China were identified operating on multiple platforms. The report was dismissed as false information by an official in the Chinese Embassy in Canada (T0143.001: Authentic Persona, T0097.111: Government Official Persona, T0129.006: Deny Involvement). | diff --git a/generated_pages/techniques/T0097.202.md b/generated_pages/techniques/T0097.202.md index e839d26..ba71199 100644 --- a/generated_pages/techniques/T0097.202.md +++ b/generated_pages/techniques/T0097.202.md @@ -13,8 +13,8 @@ | [I00079 Three thousand fake tanks](../../generated_pages/incidents/I00079.md) | “On January 4 [2017], a little-known news site based in Donetsk, Ukraine published an article claiming that the United States was sending 3,600 tanks to Europe as part of “the NATO war preparation against Russia”.

“Like much fake news, this story started with a grain of truth: the US was about to reinforce its armored units in Europe. However, the article converted literally thousands of other vehicles — including hundreds of Humvees and trailers — into tanks, building the US force into something 20 times more powerful than it actually was.

“The story caught on online. Within three days it had been repeated by a dozen websites in the United States, Canada and Europe, and shared some 40,000 times. It was translated into Norwegian; quoted, unchallenged, by Russian state news agency RIA Novosti; and spread among Russian-language websites.

“It was also an obvious fake, as any Google news search would have revealed. Yet despite its evident falsehood, it spread widely, and not just in directly Kremlin-run media. Tracking the spread of this fake therefore shines a light on the wider question of how fake stories are dispersed.”


Russian state news agency RIA Novosti presents themselves as a news outlet (T0097.202: News Outlet Persona). RIO Novosti is a real news outlet (T0143.001: Authentic Persona), but it did not carry out a basic investigation into the veracity of the narrative they published implicitly expected of institutions presenting themselves as news outlets.

We can’t know how or why this narrative ended up being published by RIA Novosti, but we know that it presented a distorted reality as authentic information (T0023: Distort Facts), claiming that the US was sending 3,600 tanks, instead of 3,600 vehicles which included ~180 tanks. | | [I00094 A glimpse inside a Chinese influence campaign: How bogus news websites blur the line between true and false](../../generated_pages/incidents/I00094.md) | Researchers identified websites managed by a Chinese marketing firm which presented themselves as news organisations.

“On its official website, the Chinese marketing firm boasted that they were in contact with news organizations across the globe, including one in South Korea called the “Chungcheng Times.” According to the joint team, this outlet is a fictional news organization created by the offending company. The Chinese company sought to disguise the site’s true identity and purpose by altering the name attached to it by one character—making it very closely resemble the name of a legitimate outlet operating out of Chungchengbuk-do.

“The marketing firm also established a news organization under the Korean name “Gyeonggido Daily,” which closely resembles legitimate news outlets operating out of Gyeonggi province such as “Gyeonggi Daily,” “Daily Gyeonggi Newspaper,” and “Gyeonggi N Daily.” One of the fake news sites was named “Incheon Focus,” a title that could be easily mistaken for the legitimate local news outlet, “Focus Incheon.” Furthermore, the Chinese marketing company operated two fake news sites with names identical to two separate local news organizations, one of which ceased operations in December 2022.

“In total, fifteen out of eighteen Chinese fake news sites incorporated the correct names of real regions in their fake company names. “If the operators had created fake news sites similar to major news organizations based in Seoul, however, the intended deception would have easily been uncovered,” explained Song Tae-eun, an assistant professor in the Department of National Security & Unification Studies at the Korea National Diplomatic Academy, to The Readable. “There is also the possibility that they are using the regional areas as an attempt to form ties with the local community; that being the government, the private sector, and religious communities.””


The firm styled their news site to resemble existing local news outlets in their target region (T0097.201: Local Institution Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona). | | [I00107 The Lies Russia Tells Itself](../../generated_pages/incidents/I00107.md) | The Moscow firm Social Design Agency (SDA) has been attributed as being behind a Russian disinformation project known as Doppelganger:

The SDA’s deception work first surfaced in 2022, likely almost immediately after Doppelganger got off the ground. In April of that year, Meta, the parent company of Facebook and Instagram, disclosed in a quarterly report that it had removed from its platforms “a network of about 200 accounts operated from Russia.” By August 2022, German investigative journalists revealed that they had discovered forgeries of about 30 news sites, including many of the country’s biggest media outlets—Frankfurter Allgemeine, Der Spiegel, and Bild—but also Britain’s Daily Mail and France’s 20 Minutes. The sites had deceptive URLs such as www-dailymail-co-uk.dailymail.top.


As part of the SDA’s work, they created many websites which impersonated existing media outlets. Sites used domain impersonation tactics to increase perceived legitimacy of their impersonations (T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0152.003: Website Hosting Platform, T0149.003: Lookalike Domain). | -| [I00110 How COVID-19 conspiracists and extremists use crowdfunding platforms to fund their activities](../../generated_pages/incidents/I00110.md) | The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:



More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | -| [I00126 Charming Kitten Updates POWERSTAR with an InterPlanetary Twist](../../generated_pages/incidents/I00126.md) | The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware). | +| [I00110 How COVID-19 conspiracists and extremists use crowdfunding platforms to fund their activities](../../generated_pages/incidents/I00110.md) | The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:

More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | +| [I00126 Charming Kitten Updates POWERSTAR with an InterPlanetary Twist](../../generated_pages/incidents/I00126.md) | The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain Asset) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware Asset). | diff --git a/generated_pages/techniques/T0097.203.md b/generated_pages/techniques/T0097.203.md index 24b0559..047bae2 100644 --- a/generated_pages/techniques/T0097.203.md +++ b/generated_pages/techniques/T0097.203.md @@ -8,7 +8,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | | [I00084 Russia turns its diplomats into disinformation warriors](../../generated_pages/incidents/I00084.md) | “[Russia’s social media] reach isn't the same as Russian state media, but they are trying to recreate what RT and Sputnik had done," said one EU official involved in tracking Russian disinformation. "It's a coordinated effort that goes beyond social media and involves specific websites."

“Central to that wider online playbook is a Telegram channel called Warfakes and an affiliated website. Since the beginning of the conflict, that social media channel has garnered more than 725,000 members and repeatedly shares alleged fact-checks aimed at debunking Ukrainian narratives, using language similar to Western-style fact-checking outlets.”


In this example a Telegram channel (T0151.004: Chat Platform, T0155.007: Encrypted Communication Channel) was established which presented itself as a source of fact checks (T0097.203: Fact Checking Organisation Persona). | -| [I00120 factcheckUK or fakecheckUK? Reinventing the political faction as the impartial factchecker](../../generated_pages/incidents/I00120.md) | Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:


That is, until a few minutes into the debate.

All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.

The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.

The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it


In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing, T0146.003: Verified Account, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). | +| [I00120 factcheckUK or fakecheckUK? Reinventing the political faction as the impartial factchecker](../../generated_pages/incidents/I00120.md) | Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:

The evening of the 19th November 2019 saw the first of three Leaders’ Debates on ITV, starting at 8pm and lasting for an hour. Current Prime Minister and leader of the Conservatives, Boris Johnson faced off against Labour party leader, Jeremy Corbyn. Plenty of people will have been watching the debate live, but a good proportion were “watching” (er, “twitching”?) via Twitter. This is something I’ve done in the past for certain shows. In some cases I just can’t watch or listen, but I can read, and in other cases, the commentary is far more interesting and entertaining than the show itself will ever be. This, for me, is just such a case. But very quickly, all eyes turned upon a modestly sized account with the handle @CCHQPress. That’s short for Conservative Campaign Headquarters Press. According to their (current!) Twitter bio, they are based in Westminster and they provide “snippets of news and commentary from CCHQ” to their 75k followers.

That is, until a few minutes into the debate.

All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.

The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.

The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it


In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing Asset, T0146.003: Verified Account Asset, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). | diff --git a/generated_pages/techniques/T0097.204.md b/generated_pages/techniques/T0097.204.md index 268e730..a2135e5 100644 --- a/generated_pages/techniques/T0097.204.md +++ b/generated_pages/techniques/T0097.204.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00072 Behind the Dutch Terror Threat Video: The St. Petersburg "Troll Factory" Connection](../../generated_pages/incidents/I00072.md) | “The creator of Geopolitika[.]ru is Aleksandr Dugin, who was sanctioned by the United States Department of Treasury in 2015 for his role in the Eurasian Youth Union “for being responsible for or complicit in actions or policies that threaten the peace, security, stability, or sovereignty or territorial integrity of Ukraine.”

[...]

“Currently, the website geopolika[.]ru redirects directly to another partner website, Katehon.

“Katehon poses itself as a think tank focused on geopolitics in an English edition of its website. In contrast, in Russian, it states its aim to develop “ideological, political, diplomatic, economic and military strategy for Russia of the future” with a special role of religion. The president of Katehon’s supervisory board is Konstantin Malofeev, a Russian millionaire with connections to the Russian orthodox church and presidential administration, who founded Tsargrad TV, a known source of disinformation. Malofeev was sanctioned by the U.S. Department of Treasury and the European Union in 2014 for material support and financial backing of Russian-backed separatists in eastern Ukraine. Another known figure from the board is Sergei Glaziev, former advisor to Putin in 2012–2019. Dugin is also on the board in the Russian edition of the website, whereas he is omitted in English.”
In this example a domain managed by an actor previously sanctioned by the US department of treasury has been reconfigured to redirect to another website; Katehon (T0149.004: Redirecting Domain, T0150.004: Repurposed).

Katehon presents itself as a geopolitical think tank in English, but does not maintain this persona when presenting itself to a Russian speaking audience (T0097.204: Think Tank Persona, T0152.004: Website, T0155.004: Geoblocked). | +| [I00072 Behind the Dutch Terror Threat Video: The St. Petersburg "Troll Factory" Connection](../../generated_pages/incidents/I00072.md) | “The creator of Geopolitika[.]ru is Aleksandr Dugin, who was sanctioned by the United States Department of Treasury in 2015 for his role in the Eurasian Youth Union “for being responsible for or complicit in actions or policies that threaten the peace, security, stability, or sovereignty or territorial integrity of Ukraine.”

[...]

“Currently, the website geopolika[.]ru redirects directly to another partner website, Katehon.

“Katehon poses itself as a think tank focused on geopolitics in an English edition of its website. In contrast, in Russian, it states its aim to develop “ideological, political, diplomatic, economic and military strategy for Russia of the future” with a special role of religion. The president of Katehon’s supervisory board is Konstantin Malofeev, a Russian millionaire with connections to the Russian orthodox church and presidential administration, who founded Tsargrad TV, a known source of disinformation. Malofeev was sanctioned by the U.S. Department of Treasury and the European Union in 2014 for material support and financial backing of Russian-backed separatists in eastern Ukraine. Another known figure from the board is Sergei Glaziev, former advisor to Putin in 2012–2019. Dugin is also on the board in the Russian edition of the website, whereas he is omitted in English.”


In this example a domain managed by an actor previously sanctioned by the US department of treasury has been reconfigured to redirect to another website; Katehon (T0149.004: Redirecting Domain Asset, T0150.004: Repurposed Asset).

Katehon presents itself as a geopolitical think tank in English, but does not maintain this persona when presenting itself to a Russian speaking audience (T0097.204: Think Tank Persona, T0152.004: Website Asset, T0155.004: Geoblocked Asset). | | [I00074 The Tactics & Tropes of the Internet Research Agency](../../generated_pages/incidents/I00074.md) | “[Russia’s Internet Research Agency, the IRA] pushed narratives with longform blog content. They created media properties, websites designed to produce stories that would resonate with those targeted. It appears, based on the data set provided by Alphabet, that the IRA may have also expanded into think tank-style communiques. One such page, previously unattributed to the IRA but included in the Alphabet data, was GI Analytics, a geopolitics blog with an international masthead that included American authors. This page was promoted via AdWords and YouTube videos; it has strong ties to more traditional Russian propaganda networks, which will be discussed later in this analysis. GI Analytics wrote articles articulating nuanced academic positions on a variety of sophisticated topics. From the site’s About page:

““Our purpose and mission are to provide high-quality analysis at a time when we are faced with a multitude of crises, a collapsing global economy, imperialist wars, environmental disasters, corporate greed, terrorism, deceit, GMO food, a migration crisis and a crackdown on small farmers and ranchers.””


In this example Alphabet’s technical indicators allowed them to assert that GI Analytics, which presented itself as a think tank, was a fabricated institution associated with Russia’s Internet Research Agency (T0097.204: Think Tank Persona, T0143.002: Fabricated Persona). | | [I00078 Meta’s September 2020 Removal of Coordinated Inauthentic Behavior](../../generated_pages/incidents/I00078.md) | “[Meta has] removed one Page, five Facebook accounts, one Group and three Instagram accounts for foreign or government interference which is coordinated inauthentic behavior on behalf of a foreign or government entity. This small network originated in Russia and focused primarily on Turkey and Europe, and also on the United States.

“This operation relied on fake accounts — some of which had been already detected and removed by our automated systems — to manage their Page and their Group, and to drive people to their site purporting to be an independent think-tank based primarily in Turkey. These accounts posed as locals based in Turkey, Canada and the US. They also recruited people to write for their website. This network had almost no following on our platforms when we removed it.”


Meta identified that a network of accounts originating in Russia were driving people off platform to a site which presented itself as a think-tank (T0097.204: Think Tank Persona). Meta did not make an attribution about the authenticity of this off-site think tank, so neither T0143.001: Authentic Persona or T0143.002: Fabricated Persona are used here.

Meta had access to technical data for accounts on its platform, and asserted that they were fabricated individuals posing as locals who recruited targets to write content for their website (T0097.101: Local Persona, T0097.106: Recruiter Persona, T0143.002: Fabricated Persona). | | [I00079 Three thousand fake tanks](../../generated_pages/incidents/I00079.md) | “The sixth [website to repost a confirmed false narrative investigated in this report] is an apparent think tank, the Center for Global Strategic Monitoring. This website describes itself, in English apparently written by a non-native speaker, as a “nonprofit and nonpartisan research and analysis institution dedicated to providing insights of the think tank community publications”. It does, indeed, publish think-tank reports on issues such as Turkey and US-China relations; however, the reports are the work of other think tanks, often unattributed (the two mentioned in this sentence were actually produced by the Brookings Institution, although the website makes no mention of the fact). It also fails to provide an address, or any other contact details other than an email, and its (long) list of experts includes entries apparently copied and pasted from other institutions. Thus, the “think tank” website which shared the fake story appears to be a fake itself.” In this example a website which amplified a false narrative presented itself as a think tank (T0097.204: Think Tank Persona).

This is an entirely fabricated persona (T0143.002: Fabricated Persona); it republished content from other think tanks without attribution (T0084.002: Plagiarise Content) and fabricated experts (T0097.108: Expert Persona, T0143.002: Fabricated Persona) to make it more believable that they were a real think tank. | diff --git a/generated_pages/techniques/T0097.205.md b/generated_pages/techniques/T0097.205.md index 9530f2b..7985623 100644 --- a/generated_pages/techniques/T0097.205.md +++ b/generated_pages/techniques/T0097.205.md @@ -9,7 +9,7 @@ | -------- | -------------------- | | [I00070 Eli Lilly Clarifies It’s Not Offering Free Insulin After Tweet From Fake Verified Account—As Chaos Unfolds On Twitter](../../generated_pages/incidents/I00070.md) | “Twitter Blue launched [November 2022], giving any users who pay $8 a month the ability to be verified on the site, a feature previously only available to public figures, government officials and journalists as a way to show they are who they claim to be.

“[A day after the launch], an account with the handle @EliLillyandCo labeled itself with the name “Eli Lilly and Company,” and by using the same logo as the company in its profile picture and with the verification checkmark, was indistinguishable from the real company (the picture has since been removed and the account has labeled itself as a parody profile).

The parody account tweeted “we are excited to announce insulin is free now.””


In this example an account impersonated the pharmaceutical company Eli Lilly (T0097.205: Business Persona, T0143.003: Impersonated Persona) by copying its name, profile picture (T0145.001: Copy Account Imagery), and paying for verification. | | [I00095 Meta: Chinese disinformation network was behind London front company recruiting content creators](../../generated_pages/incidents/I00095.md) | “A Chinese disinformation network operating fictitious employee personas across the internet used a front company in London to recruit content creators and translators around the world, according to Meta.

“The operation used a company called London New Europe Media, registered to an address on the upmarket Kensington High Street, that attempted to recruit real people to help it produce content. It is not clear how many people it ultimately recruited.

“London New Europe Media also “tried to engage individuals to record English-language videos scripted by the network,” in one case leading to a recording criticizing the United States being posted on YouTube, said Meta”.


In this example a front company was used (T0097.205: Business Persona) to enable actors to recruit targets for producing content (T0097.106: Recruiter Persona, T0143.002: Fabricated Persona). | -| [I00116 Blue-tick scammers target consumers who complain on X](../../generated_pages/incidents/I00116.md) | Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account, T0146.003: Verified Account, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created). | +| [I00116 Blue-tick scammers target consumers who complain on X](../../generated_pages/incidents/I00116.md) | Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account Asset, T0146.003: Verified Account Asset, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created Asset). | diff --git a/generated_pages/techniques/T0097.207.md b/generated_pages/techniques/T0097.207.md index e6f140a..63f5d43 100644 --- a/generated_pages/techniques/T0097.207.md +++ b/generated_pages/techniques/T0097.207.md @@ -8,7 +8,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | | [I00069 Uncharmed: Untangling Iran's APT42 Operations](../../generated_pages/incidents/I00069.md) | “[Iranian state-sponsored cyber espionage actor] APT42 cloud operations attack lifecycle can be described in details as follows:

- “Social engineering schemes involving decoys and trust building, which includes masquerading as legitimate NGOs and conducting ongoing correspondence with the target, sometimes lasting several weeks.
- The threat actor masqueraded as well-known international organizations in the legal and NGO fields and sent emails from domains typosquatting the original NGO domains, for example aspenlnstitute[.]org.
- The Aspen Institute became aware of this spoofed domain and collaborated with industry partners, including blocking it in SafeBrowsing, thus protecting users of Google Chrome and additional browsers.
- To increase their credibility, APT42 impersonated high-ranking personnel working at the aforementioned organizations when creating the email personas.
- APT42 enhanced their campaign credibility by using decoy material inviting targets to legitimate and relevant events and conferences. In one instance, the decoy material was hosted on an attacker-controlled SharePoint folder, accessible only after the victim entered their credentials. Mandiant did not identify malicious elements in the files, suggesting they were used solely to gain the victim’s trust.”


In this example APT42, an Iranian state-sponsored cyber espionage actor, created a domain impersonating the existing NGO The Aspen Institute (T0143.003: Impersonated Persona, T0097.207: NGO Persona). They increased the perceived legitimacy of the impersonation by also impersonating high-ranking employees of the NGO (T0097.100: Individual Persona, T0143.003: Impersonated Persona). | -| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain) reveal a website set up to sell merchandise (T0152.004: Website, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.<.i>

Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server, T0149.006: IP Address). | +| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain Asset) reveal a website set up to sell merchandise (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website Asset, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.


Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain Asset, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server Asset, T0149.006: IP Address Asset). | diff --git a/generated_pages/techniques/T0097.208.md b/generated_pages/techniques/T0097.208.md index 743aab1..93b5f0c 100644 --- a/generated_pages/techniques/T0097.208.md +++ b/generated_pages/techniques/T0097.208.md @@ -7,9 +7,9 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00074 The Tactics & Tropes of the Internet Research Agency](../../generated_pages/incidents/I00074.md) | “The Black Matters Facebook Page [operated by Russia’s Internet Research Agency] explored several visual brand identities, moving from a plain logo to a gothic typeface on Jan 19th, 2016. On February 4th, 2016, the person who ran the Facebook Page announced the launch of the website, blackmattersus[.]com, emphasizing media distrust and a desire to build Black independent media; [“I DIDN’T BELIEVE THE MEDIA / SO I BECAME ONE”]”

In this example an asset controlled by Russia’s Internet Research Agency began to present itself as a source of “Black independent media”, claiming that the media could not be trusted (T0097.208: Social Cause Persona, T0097.202: News Outlet Persona, T0143.002: Fabricated Persona). | -| [I00114 ‘Carol’s Journey’: What Facebook knew about how it radicalized users](../../generated_pages/incidents/I00114.md) | This report examines internal Facebook communications which reveal employees’ concerns about how the platform’s algorithm was recommending users join extremist conspiracy groups.

In summer 2019, a new Facebook user named Carol Smith signed up for the platform, describing herself as a politically conservative mother from Wilmington, North Carolina. Smith’s account indicated an interest in politics, parenting and Christianity and followed a few of her favorite brands, including Fox News and then-President Donald Trump.

Though Smith had never expressed interest in conspiracy theories, in just two days Facebook was recommending she join groups dedicated to QAnon, a sprawling and baseless conspiracy theory and movement that claimed Trump was secretly saving the world from a cabal of pedophiles and Satanists.

Smith didn’t follow the recommended QAnon groups, but whatever algorithm Facebook was using to determine how she should engage with the platform pushed ahead just the same. Within one week, Smith’s feed was full of groups and pages that had violated Facebook’s own rules, including those against hate speech and disinformation.

Smith wasn’t a real person. A researcher employed by Facebook invented the account, along with those of other fictitious “test users” in 2019 and 2020, as part of an experiment in studying the platform’s role in misinforming and polarizing users through its recommendations systems.

That researcher said Smith’s Facebook experience was “a barrage of extreme, conspiratorial, and graphic content.”


Facebook’s Algorithm suggested users join groups which supported the QAnon movement (T0151.001: Social Media Platform, T0151.002: Online Community Group, T0153.006: Content Recommendation Algorithm, T0097.208: Social Cause Persona).

Further investigation by Facebook uncovered that its advertising platform had been used to promote QAnon narratives (T0146: Account, T0114: Deliver Ads, T0153.005: Online Advertising Platform):

For years, company researchers had been running experiments like Carol Smith’s to gauge the platform’s hand in radicalizing users, according to the documents seen by NBC News.

This internal work repeatedly found that recommendation tools pushed users into extremist groups, findings that helped inform policy changes and tweaks to recommendations and news feed rankings. Those rankings are a tentacled, ever-evolving system widely known as “the algorithm” that pushes content to users. But the research at that time stopped well short of inspiring any movement to change the groups and pages themselves.

That reluctance was indicative of “a pattern at Facebook,” Haugen told reporters this month. “They want the shortest path between their current policies and any action.”

[...]

By summer 2020, Facebook was hosting thousands of private QAnon groups and pages, with millions of members and followers, according to an unreleased internal investigation.

A year after the FBI designated QAnon as a potential domestic terrorist threat in the wake of standoffs, alleged planned kidnappings, harassment campaigns and shootings, Facebook labeled QAnon a “Violence Inciting Conspiracy Network” and banned it from the platform, along with militias and other violent social movements. A small team working across several of Facebook’s departments found its platforms had hosted hundreds of ads on Facebook and Instagram worth thousands of dollars and millions of views, “praising, supporting, or representing” the conspiracy theory.

[...]

For many employees inside Facebook, the enforcement came too late, according to posts left on Workplace, the company’s internal message board.

“We’ve known for over a year now that our recommendation systems can very quickly lead users down the path to conspiracy theories and groups,” one integrity researcher, whose name had been redacted, wrote in a post announcing she was leaving the company. “This fringe group has grown to national prominence, with QAnon congressional candidates and QAnon hashtags and groups trending in the mainstream. We were willing to act only * after * things had spiraled into a dire state.”

While Facebook’s ban initially appeared effective, a problem remained: The removal of groups and pages didn’t wipe out QAnon’s most extreme followers, who continued to organize on the platform.

“There was enough evidence to raise red flags in the expert community that Facebook and other platforms failed to address QAnon’s violent extremist dimension,” said Marc-André Argentino, a research fellow at King’s College London’s International Centre for the Study of Radicalisation, who has extensively studied QAnon.

Believers simply rebranded as anti-child-trafficking groups or migrated to other communities, including those around the anti-vaccine movement.

[...]

These conspiracy groups had become the fastest-growing groups on Facebook, according to the report, but Facebook wasn’t able to control their “meteoric growth,” the researchers wrote, “because we were looking at each entity individually, rather than as a cohesive movement.” A Facebook spokesperson told BuzzFeed News it took many steps to limit election misinformation but that it was unable to catch everything.
| -| [I00128 #TrollTracker: Outward Influence Operation From Iran](../../generated_pages/incidents/I00128.md) | [Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.

The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.

Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.


In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code), which took users to a document hosted on another website (T0152.004: Website). | +| [I00074 The Tactics & Tropes of the Internet Research Agency](../../generated_pages/incidents/I00074.md) | “The Black Matters Facebook Page [operated by Russia’s Internet Research Agency] explored several visual brand identities, moving from a plain logo to a gothic typeface on Jan 19th, 2016. On February 4th, 2016, the person who ran the Facebook Page announced the launch of the website, blackmattersus[.]com, emphasizing media distrust and a desire to build Black independent media; [“I DIDN’T BELIEVE THE MEDIA / SO I BECAME ONE”]”

In this example an asset controlled by Russia’s Internet Research Agency began to present itself as a source of “Black independent media”, claiming that the media could not be trusted (T0097.208: Social Cause Persona, T0097.202: News Outlet Persona, T0143.002: Fabricated Persona). | +| [I00114 ‘Carol’s Journey’: What Facebook knew about how it radicalized users](../../generated_pages/incidents/I00114.md) | This report examines internal Facebook communications which reveal employees’ concerns about how the platform’s algorithm was recommending users join extremist conspiracy groups.

In summer 2019, a new Facebook user named Carol Smith signed up for the platform, describing herself as a politically conservative mother from Wilmington, North Carolina. Smith’s account indicated an interest in politics, parenting and Christianity and followed a few of her favorite brands, including Fox News and then-President Donald Trump.

Though Smith had never expressed interest in conspiracy theories, in just two days Facebook was recommending she join groups dedicated to QAnon, a sprawling and baseless conspiracy theory and movement that claimed Trump was secretly saving the world from a cabal of pedophiles and Satanists.

Smith didn’t follow the recommended QAnon groups, but whatever algorithm Facebook was using to determine how she should engage with the platform pushed ahead just the same. Within one week, Smith’s feed was full of groups and pages that had violated Facebook’s own rules, including those against hate speech and disinformation.

Smith wasn’t a real person. A researcher employed by Facebook invented the account, along with those of other fictitious “test users” in 2019 and 2020, as part of an experiment in studying the platform’s role in misinforming and polarizing users through its recommendations systems.

That researcher said Smith’s Facebook experience was “a barrage of extreme, conspiratorial, and graphic content.”


Facebook’s Algorithm suggested users join groups which supported the QAnon movement (T0151.001: Social Media Platform, T0151.002: Online Community Group, T0153.006: Content Recommendation Algorithm, T0097.208: Social Cause Persona).

Further investigation by Facebook uncovered that its advertising platform had been used to promote QAnon narratives (T0146: Account Asset, T0114: Deliver Ads, T0153.005: Online Advertising Platform):

For years, company researchers had been running experiments like Carol Smith’s to gauge the platform’s hand in radicalizing users, according to the documents seen by NBC News.

This internal work repeatedly found that recommendation tools pushed users into extremist groups, findings that helped inform policy changes and tweaks to recommendations and news feed rankings. Those rankings are a tentacled, ever-evolving system widely known as “the algorithm” that pushes content to users. But the research at that time stopped well short of inspiring any movement to change the groups and pages themselves.

That reluctance was indicative of “a pattern at Facebook,” Haugen told reporters this month. “They want the shortest path between their current policies and any action.”

[...]

By summer 2020, Facebook was hosting thousands of private QAnon groups and pages, with millions of members and followers, according to an unreleased internal investigation.

A year after the FBI designated QAnon as a potential domestic terrorist threat in the wake of standoffs, alleged planned kidnappings, harassment campaigns and shootings, Facebook labeled QAnon a “Violence Inciting Conspiracy Network” and banned it from the platform, along with militias and other violent social movements. A small team working across several of Facebook’s departments found its platforms had hosted hundreds of ads on Facebook and Instagram worth thousands of dollars and millions of views, “praising, supporting, or representing” the conspiracy theory.

[...]

For many employees inside Facebook, the enforcement came too late, according to posts left on Workplace, the company’s internal message board.

“We’ve known for over a year now that our recommendation systems can very quickly lead users down the path to conspiracy theories and groups,” one integrity researcher, whose name had been redacted, wrote in a post announcing she was leaving the company. “This fringe group has grown to national prominence, with QAnon congressional candidates and QAnon hashtags and groups trending in the mainstream. We were willing to act only * after * things had spiraled into a dire state.”

While Facebook’s ban initially appeared effective, a problem remained: The removal of groups and pages didn’t wipe out QAnon’s most extreme followers, who continued to organize on the platform.

“There was enough evidence to raise red flags in the expert community that Facebook and other platforms failed to address QAnon’s violent extremist dimension,” said Marc-André Argentino, a research fellow at King’s College London’s International Centre for the Study of Radicalisation, who has extensively studied QAnon.

Believers simply rebranded as anti-child-trafficking groups or migrated to other communities, including those around the anti-vaccine movement.

[...]

These conspiracy groups had become the fastest-growing groups on Facebook, according to the report, but Facebook wasn’t able to control their “meteoric growth,” the researchers wrote, “because we were looking at each entity individually, rather than as a cohesive movement.” A Facebook spokesperson told BuzzFeed News it took many steps to limit election misinformation but that it was unable to catch everything.
| +| [I00128 #TrollTracker: Outward Influence Operation From Iran](../../generated_pages/incidents/I00128.md) | [Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.

The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.

Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.


In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code Asset), which took users to a document hosted on another website (T0152.004: Website Asset). | diff --git a/generated_pages/techniques/T0101.md b/generated_pages/techniques/T0101.md index dffd11f..462163b 100644 --- a/generated_pages/techniques/T0101.md +++ b/generated_pages/techniques/T0101.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00102 Ignore The Poway Synagogue Shooter’s Manifesto: Pay Attention To 8chan’s /pol/ Board](../../generated_pages/incidents/I00102.md) | On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | +| [I00102 Ignore The Poway Synagogue Shooter’s Manifesto: Pay Attention To 8chan’s /pol/ Board](../../generated_pages/incidents/I00102.md) | On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | diff --git a/generated_pages/techniques/T0114.md b/generated_pages/techniques/T0114.md index 4a881d3..b8d7d99 100644 --- a/generated_pages/techniques/T0114.md +++ b/generated_pages/techniques/T0114.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00114 ‘Carol’s Journey’: What Facebook knew about how it radicalized users](../../generated_pages/incidents/I00114.md) | This report examines internal Facebook communications which reveal employees’ concerns about how the platform’s algorithm was recommending users join extremist conspiracy groups.

In summer 2019, a new Facebook user named Carol Smith signed up for the platform, describing herself as a politically conservative mother from Wilmington, North Carolina. Smith’s account indicated an interest in politics, parenting and Christianity and followed a few of her favorite brands, including Fox News and then-President Donald Trump.

Though Smith had never expressed interest in conspiracy theories, in just two days Facebook was recommending she join groups dedicated to QAnon, a sprawling and baseless conspiracy theory and movement that claimed Trump was secretly saving the world from a cabal of pedophiles and Satanists.

Smith didn’t follow the recommended QAnon groups, but whatever algorithm Facebook was using to determine how she should engage with the platform pushed ahead just the same. Within one week, Smith’s feed was full of groups and pages that had violated Facebook’s own rules, including those against hate speech and disinformation.

Smith wasn’t a real person. A researcher employed by Facebook invented the account, along with those of other fictitious “test users” in 2019 and 2020, as part of an experiment in studying the platform’s role in misinforming and polarizing users through its recommendations systems.

That researcher said Smith’s Facebook experience was “a barrage of extreme, conspiratorial, and graphic content.”


Facebook’s Algorithm suggested users join groups which supported the QAnon movement (T0151.001: Social Media Platform, T0151.002: Online Community Group, T0153.006: Content Recommendation Algorithm, T0097.208: Social Cause Persona).

Further investigation by Facebook uncovered that its advertising platform had been used to promote QAnon narratives (T0146: Account, T0114: Deliver Ads, T0153.005: Online Advertising Platform):

For years, company researchers had been running experiments like Carol Smith’s to gauge the platform’s hand in radicalizing users, according to the documents seen by NBC News.

This internal work repeatedly found that recommendation tools pushed users into extremist groups, findings that helped inform policy changes and tweaks to recommendations and news feed rankings. Those rankings are a tentacled, ever-evolving system widely known as “the algorithm” that pushes content to users. But the research at that time stopped well short of inspiring any movement to change the groups and pages themselves.

That reluctance was indicative of “a pattern at Facebook,” Haugen told reporters this month. “They want the shortest path between their current policies and any action.”

[...]

By summer 2020, Facebook was hosting thousands of private QAnon groups and pages, with millions of members and followers, according to an unreleased internal investigation.

A year after the FBI designated QAnon as a potential domestic terrorist threat in the wake of standoffs, alleged planned kidnappings, harassment campaigns and shootings, Facebook labeled QAnon a “Violence Inciting Conspiracy Network” and banned it from the platform, along with militias and other violent social movements. A small team working across several of Facebook’s departments found its platforms had hosted hundreds of ads on Facebook and Instagram worth thousands of dollars and millions of views, “praising, supporting, or representing” the conspiracy theory.

[...]

For many employees inside Facebook, the enforcement came too late, according to posts left on Workplace, the company’s internal message board.

“We’ve known for over a year now that our recommendation systems can very quickly lead users down the path to conspiracy theories and groups,” one integrity researcher, whose name had been redacted, wrote in a post announcing she was leaving the company. “This fringe group has grown to national prominence, with QAnon congressional candidates and QAnon hashtags and groups trending in the mainstream. We were willing to act only * after * things had spiraled into a dire state.”

While Facebook’s ban initially appeared effective, a problem remained: The removal of groups and pages didn’t wipe out QAnon’s most extreme followers, who continued to organize on the platform.

“There was enough evidence to raise red flags in the expert community that Facebook and other platforms failed to address QAnon’s violent extremist dimension,” said Marc-André Argentino, a research fellow at King’s College London’s International Centre for the Study of Radicalisation, who has extensively studied QAnon.

Believers simply rebranded as anti-child-trafficking groups or migrated to other communities, including those around the anti-vaccine movement.

[...]

These conspiracy groups had become the fastest-growing groups on Facebook, according to the report, but Facebook wasn’t able to control their “meteoric growth,” the researchers wrote, “because we were looking at each entity individually, rather than as a cohesive movement.” A Facebook spokesperson told BuzzFeed News it took many steps to limit election misinformation but that it was unable to catch everything.
| +| [I00114 ‘Carol’s Journey’: What Facebook knew about how it radicalized users](../../generated_pages/incidents/I00114.md) | This report examines internal Facebook communications which reveal employees’ concerns about how the platform’s algorithm was recommending users join extremist conspiracy groups.

In summer 2019, a new Facebook user named Carol Smith signed up for the platform, describing herself as a politically conservative mother from Wilmington, North Carolina. Smith’s account indicated an interest in politics, parenting and Christianity and followed a few of her favorite brands, including Fox News and then-President Donald Trump.

Though Smith had never expressed interest in conspiracy theories, in just two days Facebook was recommending she join groups dedicated to QAnon, a sprawling and baseless conspiracy theory and movement that claimed Trump was secretly saving the world from a cabal of pedophiles and Satanists.

Smith didn’t follow the recommended QAnon groups, but whatever algorithm Facebook was using to determine how she should engage with the platform pushed ahead just the same. Within one week, Smith’s feed was full of groups and pages that had violated Facebook’s own rules, including those against hate speech and disinformation.

Smith wasn’t a real person. A researcher employed by Facebook invented the account, along with those of other fictitious “test users” in 2019 and 2020, as part of an experiment in studying the platform’s role in misinforming and polarizing users through its recommendations systems.

That researcher said Smith’s Facebook experience was “a barrage of extreme, conspiratorial, and graphic content.”


Facebook’s Algorithm suggested users join groups which supported the QAnon movement (T0151.001: Social Media Platform, T0151.002: Online Community Group, T0153.006: Content Recommendation Algorithm, T0097.208: Social Cause Persona).

Further investigation by Facebook uncovered that its advertising platform had been used to promote QAnon narratives (T0146: Account Asset, T0114: Deliver Ads, T0153.005: Online Advertising Platform):

For years, company researchers had been running experiments like Carol Smith’s to gauge the platform’s hand in radicalizing users, according to the documents seen by NBC News.

This internal work repeatedly found that recommendation tools pushed users into extremist groups, findings that helped inform policy changes and tweaks to recommendations and news feed rankings. Those rankings are a tentacled, ever-evolving system widely known as “the algorithm” that pushes content to users. But the research at that time stopped well short of inspiring any movement to change the groups and pages themselves.

That reluctance was indicative of “a pattern at Facebook,” Haugen told reporters this month. “They want the shortest path between their current policies and any action.”

[...]

By summer 2020, Facebook was hosting thousands of private QAnon groups and pages, with millions of members and followers, according to an unreleased internal investigation.

A year after the FBI designated QAnon as a potential domestic terrorist threat in the wake of standoffs, alleged planned kidnappings, harassment campaigns and shootings, Facebook labeled QAnon a “Violence Inciting Conspiracy Network” and banned it from the platform, along with militias and other violent social movements. A small team working across several of Facebook’s departments found its platforms had hosted hundreds of ads on Facebook and Instagram worth thousands of dollars and millions of views, “praising, supporting, or representing” the conspiracy theory.

[...]

For many employees inside Facebook, the enforcement came too late, according to posts left on Workplace, the company’s internal message board.

“We’ve known for over a year now that our recommendation systems can very quickly lead users down the path to conspiracy theories and groups,” one integrity researcher, whose name had been redacted, wrote in a post announcing she was leaving the company. “This fringe group has grown to national prominence, with QAnon congressional candidates and QAnon hashtags and groups trending in the mainstream. We were willing to act only * after * things had spiraled into a dire state.”

While Facebook’s ban initially appeared effective, a problem remained: The removal of groups and pages didn’t wipe out QAnon’s most extreme followers, who continued to organize on the platform.

“There was enough evidence to raise red flags in the expert community that Facebook and other platforms failed to address QAnon’s violent extremist dimension,” said Marc-André Argentino, a research fellow at King’s College London’s International Centre for the Study of Radicalisation, who has extensively studied QAnon.

Believers simply rebranded as anti-child-trafficking groups or migrated to other communities, including those around the anti-vaccine movement.

[...]

These conspiracy groups had become the fastest-growing groups on Facebook, according to the report, but Facebook wasn’t able to control their “meteoric growth,” the researchers wrote, “because we were looking at each entity individually, rather than as a cohesive movement.” A Facebook spokesperson told BuzzFeed News it took many steps to limit election misinformation but that it was unable to catch everything.
| diff --git a/generated_pages/techniques/T0115.md b/generated_pages/techniques/T0115.md index db13b81..51405d2 100644 --- a/generated_pages/techniques/T0115.md +++ b/generated_pages/techniques/T0115.md @@ -7,9 +7,9 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00096 China ramps up use of AI misinformation](../../generated_pages/incidents/I00096.md) | The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | +| [I00096 China ramps up use of AI misinformation](../../generated_pages/incidents/I00096.md) | The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account Asset, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account Asset, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | | [I00104 Macron Campaign Hit With “Massive and Coordinated” Hacking Attack](../../generated_pages/incidents/I00104.md) | A massive trove of documents purporting to contain thousands of emails and other files from the [2017 presidential] campaign of Emmanuel Macron—the French centrist candidate squaring off against right-wing nationalist Marine Le Pen—was posted on the internet Friday afternoon. The Macron campaign says that at least some of the documents are fake. The document dump came just over a day before voting is set to begin in the final round of the election and mere hours before candidates are legally required to stop campaigning.

At about 2:35 p.m. ET, a post appeared on the 4chan online message board announcing the leak. The documents appear to include emails, internal memos, and screenshots of purported banking records.

“In this pastebin are links to torrents of emails between Macron, his team and other officials, politicians as well as original documents and photos,” the anonymous 4chan poster wrote. “This was passed on to me today so now I am giving it to you, the people. The leak is massvie and released in the hopes that the human search engine here will be able to start sifting through the contents and figure out exactly what we have here.”

The Macron campaign issued a statement Friday night saying it was the victim of a “massive and coordinated” hacking attack. That campaign said the leak included some fake documents that were intended “to sow doubt and misinformation.”


Actors posted a to 4chan a link (T0151.012: Image Board Platform, T0146.006: Open Access Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms) to text content hosted on pastebin (T0152.005: Paste Platform, T0146.006: Open Access Platform, T0115: Post Content), which contained links to download stolen and fabricated documents. | -| [I00118 ‘War Thunder’ players are once again leaking sensitive military technology information on a video game forum](../../generated_pages/incidents/I00118.md) | In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.

This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.

A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.

The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.

The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual.

[...]

A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.


A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). | +| [I00118 ‘War Thunder’ players are once again leaking sensitive military technology information on a video game forum](../../generated_pages/incidents/I00118.md) | In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.

This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.

A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.

The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.

The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual.

[...]

A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.


A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account Asset, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). | diff --git a/generated_pages/techniques/T0121.001.md b/generated_pages/techniques/T0121.001.md index 8abdf49..19d956d 100644 --- a/generated_pages/techniques/T0121.001.md +++ b/generated_pages/techniques/T0121.001.md @@ -7,8 +7,8 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00110 How COVID-19 conspiracists and extremists use crowdfunding platforms to fund their activities](../../generated_pages/incidents/I00110.md) | The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:



More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | -| [I00112 Patreon allows disinformation and conspiracies to be monetised in Spain](../../generated_pages/incidents/I00112.md) | In this report EU DisinfoLab identified 17 Spanish accounts that monetise and/or spread QAnon content or other conspiracy theories on Patreon. Content produced by these accounts go against Patreon’s stated policy of removing creators that “advance disinformation promoting the QAnon conspiracy theory”. EU DisinfoLab found:

In most cases, the creators monetise the content directly on Patreon (posts are only accessible for people sponsoring the creators) but there are also cases of indirect monetization (monetization through links leading to other platforms), an aspect that was flagged and analysed by Eu DisinfoLab in the mentioned previous report.

Some creators display links that redirects users to other platforms such as YouTube or LBRY where they can monetise their content. Some even offer almost all of their videos for free by redirecting to their YouTube channel.

Another modus operandi is for the creators to advertise on Patreon that they are looking for financing through PayPal or provide the author’s email to explore other financing alternatives.

[...]

Sometimes the content offered for a fee on Patreon is freely accessible on other platforms. Creators openly explain that they seek voluntary donation on Patreon, but that their creations will be public on YouTube. This means that the model of these platforms does not always involve a direct monetisation of the content. Creators who have built a strong reputation previously on other platforms can use Patreon as a platform to get some sponsorship which is not related to the content and give them more freedom to create.

Some users explicitly claim to use Patreon as a secondary channel to evade “censorship” by other platforms such as YouTube. Patreon seems to be perceived as a safe haven for disinformation and fringe content that has been suppressed for violating the policies of other platforms. For example, Jorge Guerra points out how Patreon acts as a back-up channel to go to in case of censorship by YouTube. Meanwhile, Alfa Mind openly claims to use Patreon to publish content that is not allowed on YouTube. “Exclusive access to videos that are prohibited on YouTube. These videos are only accessible on Patreon, as their content is very explicit and shocking”, are offered to the patrons who pay €3 per month.


In spite of Patreon’s stated policy, actors use accounts on their platform to generate revenue or donations for their content, and to provide a space to host content which was removed from other platforms (T0146: Account, T0152.012: Subscription Service Platform, T0121.001: Bypass Content Bocking).

Some actors were observed accepting donations via PayPal (T0146: Account, T0148.003: Payment Processing Platform). | +| [I00110 How COVID-19 conspiracists and extremists use crowdfunding platforms to fund their activities](../../generated_pages/incidents/I00110.md) | The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:

More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | +| [I00112 Patreon allows disinformation and conspiracies to be monetised in Spain](../../generated_pages/incidents/I00112.md) | In this report EU DisinfoLab identified 17 Spanish accounts that monetise and/or spread QAnon content or other conspiracy theories on Patreon. Content produced by these accounts go against Patreon’s stated policy of removing creators that “advance disinformation promoting the QAnon conspiracy theory”. EU DisinfoLab found:

In most cases, the creators monetise the content directly on Patreon (posts are only accessible for people sponsoring the creators) but there are also cases of indirect monetization (monetization through links leading to other platforms), an aspect that was flagged and analysed by Eu DisinfoLab in the mentioned previous report.

Some creators display links that redirects users to other platforms such as YouTube or LBRY where they can monetise their content. Some even offer almost all of their videos for free by redirecting to their YouTube channel.

Another modus operandi is for the creators to advertise on Patreon that they are looking for financing through PayPal or provide the author’s email to explore other financing alternatives.

[...]

Sometimes the content offered for a fee on Patreon is freely accessible on other platforms. Creators openly explain that they seek voluntary donation on Patreon, but that their creations will be public on YouTube. This means that the model of these platforms does not always involve a direct monetisation of the content. Creators who have built a strong reputation previously on other platforms can use Patreon as a platform to get some sponsorship which is not related to the content and give them more freedom to create.

Some users explicitly claim to use Patreon as a secondary channel to evade “censorship” by other platforms such as YouTube. Patreon seems to be perceived as a safe haven for disinformation and fringe content that has been suppressed for violating the policies of other platforms. For example, Jorge Guerra points out how Patreon acts as a back-up channel to go to in case of censorship by YouTube. Meanwhile, Alfa Mind openly claims to use Patreon to publish content that is not allowed on YouTube. “Exclusive access to videos that are prohibited on YouTube. These videos are only accessible on Patreon, as their content is very explicit and shocking”, are offered to the patrons who pay €3 per month.


In spite of Patreon’s stated policy, actors use accounts on their platform to generate revenue or donations for their content, and to provide a space to host content which was removed from other platforms (T0146: Account Asset, T0152.012: Subscription Service Platform, T0121.001: Bypass Content Bocking).

Some actors were observed accepting donations via PayPal (T0146: Account Asset, T0148.003: Payment Processing Platform). | diff --git a/generated_pages/techniques/T0121.md b/generated_pages/techniques/T0121.md index e6fae26..ec1c67f 100644 --- a/generated_pages/techniques/T0121.md +++ b/generated_pages/techniques/T0121.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00113 Inside the Shadowy World of Disinformation for Hire in Kenya](../../generated_pages/incidents/I00113.md) | Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | +| [I00113 Inside the Shadowy World of Disinformation for Hire in Kenya](../../generated_pages/incidents/I00113.md) | Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account Asset, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | diff --git a/generated_pages/techniques/T0122.md b/generated_pages/techniques/T0122.md index 4596f0a..202df46 100644 --- a/generated_pages/techniques/T0122.md +++ b/generated_pages/techniques/T0122.md @@ -7,13 +7,13 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00102 Ignore The Poway Synagogue Shooter’s Manifesto: Pay Attention To 8chan’s /pol/ Board](../../generated_pages/incidents/I00102.md) | On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | +| [I00102 Ignore The Poway Synagogue Shooter’s Manifesto: Pay Attention To 8chan’s /pol/ Board](../../generated_pages/incidents/I00102.md) | On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | | [I00104 Macron Campaign Hit With “Massive and Coordinated” Hacking Attack](../../generated_pages/incidents/I00104.md) | A massive trove of documents purporting to contain thousands of emails and other files from the [2017 presidential] campaign of Emmanuel Macron—the French centrist candidate squaring off against right-wing nationalist Marine Le Pen—was posted on the internet Friday afternoon. The Macron campaign says that at least some of the documents are fake. The document dump came just over a day before voting is set to begin in the final round of the election and mere hours before candidates are legally required to stop campaigning.

At about 2:35 p.m. ET, a post appeared on the 4chan online message board announcing the leak. The documents appear to include emails, internal memos, and screenshots of purported banking records.

“In this pastebin are links to torrents of emails between Macron, his team and other officials, politicians as well as original documents and photos,” the anonymous 4chan poster wrote. “This was passed on to me today so now I am giving it to you, the people. The leak is massvie and released in the hopes that the human search engine here will be able to start sifting through the contents and figure out exactly what we have here.”

The Macron campaign issued a statement Friday night saying it was the victim of a “massive and coordinated” hacking attack. That campaign said the leak included some fake documents that were intended “to sow doubt and misinformation.”


Actors posted a to 4chan a link (T0151.012: Image Board Platform, T0146.006: Open Access Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms) to text content hosted on pastebin (T0152.005: Paste Platform, T0146.006: Open Access Platform, T0115: Post Content), which contained links to download stolen and fabricated documents. | -| [I00106 Facebook Is Being Flooded With Gross AI-Generated Images of Hurricane Helene Devastation](../../generated_pages/incidents/I00106.md) | As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.

A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.

But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.

Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.

But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.

The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.


A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis).

The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account, T0148.007: eCommerce Platform). | -| [I00108 How you thought you support the animals and you ended up funding white supremacists](../../generated_pages/incidents/I00108.md) | This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account, T0153.005: Online Advertising Platform). | -| [I00116 Blue-tick scammers target consumers who complain on X](../../generated_pages/incidents/I00116.md) | Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account, T0146.003: Verified Account, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created). | +| [I00106 Facebook Is Being Flooded With Gross AI-Generated Images of Hurricane Helene Devastation](../../generated_pages/incidents/I00106.md) | As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.

A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.

But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.

Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.

But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.

The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.


A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis).

The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account Asset, T0148.007: eCommerce Platform). | +| [I00108 How you thought you support the animals and you ended up funding white supremacists](../../generated_pages/incidents/I00108.md) | This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website Asset), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account Asset, T0153.005: Online Advertising Platform). | +| [I00116 Blue-tick scammers target consumers who complain on X](../../generated_pages/incidents/I00116.md) | Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account Asset, T0146.003: Verified Account Asset, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created Asset). | | [I00123 The Extreme Right on Steam](../../generated_pages/incidents/I00123.md) | ISD conducted an investigation into the usage of social groups on Steam. Steam is an online platform used to buy and sell digital games, and includes the Steam community feature, which “allows users to find friends and join groups and discussion forums, while also offering in-game voice and text chat”. Actors have used Steam’s social capabilities to enable online harm campaigns:

A number of groups were observed encouraging members to join conversations on outside platforms. These include links to Telegram channels connected to white supremacist marches, and media outlets, forums and Discord servers run by neo-Nazis.

[...]

This off-ramping activity demonstrates how rather than sitting in isolation, Steam fits into the wider extreme right wing online ecosystem, with Steam groups acting as hubs for communities and organizations which span multiple platforms. Accordingly, although the platform appears to fill a specific role in the building and strengthening of communities with similar hobbies and interests, it is suggested that analysis seeking to determine the risk of these communities should focus on their activity across platforms


Social Groups on Steam were used to drive new people to other neo-Nazi controlled community assets (T0122: Direct Users to Alternative Platforms, T0152.009: Software Delivery Platform, T0151.002: Online Community Group). | -| [I00128 #TrollTracker: Outward Influence Operation From Iran](../../generated_pages/incidents/I00128.md) | [Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.

The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.

Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.


In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code), which took users to a document hosted on another website (T0152.004: Website). | +| [I00128 #TrollTracker: Outward Influence Operation From Iran](../../generated_pages/incidents/I00128.md) | [Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.

The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.

Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.


In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code Asset), which took users to a document hosted on another website (T0152.004: Website Asset). | diff --git a/generated_pages/techniques/T0124.md b/generated_pages/techniques/T0124.md index 2ce65ee..a6d1f8b 100644 --- a/generated_pages/techniques/T0124.md +++ b/generated_pages/techniques/T0124.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00101 Pro-Putin Disinformation Warriors Take War of Aggression to Reddit](../../generated_pages/incidents/I00101.md) | This report looks at changes content posted to communities on Reddit (called Subreddits) after teams of voluntary moderators are replaced with what appear to be pro-Russian voices:

The r/antiwar subreddit appears to be a very recent takeover target. With 12,900 members it is not the largest community on Reddit, but this does place it squarely within the top 5% of all communities in terms of membership.

Three months ago a new moderator team was instated by subreddit head u/democracy101. Any posts documenting Russian aggression in Ukraine are now swiftly removed, while the board has been flooded with posts about how Ukraine is losing, or how American “neocons wrecked” the country.

The pinned post from moderator u/n0ahbody proclaims: “People who call for an end to Russian aggressions but not the Western aggressions Russia is reacting to don’t really want peace.” This user takes the view that any negative opinion about Russia is “shaped by what the fanatically Russophobic MSM wants you to think,” and that the United States is not threatened by its neighbors. Russia is.”

When u/n0ahbody took over the sub, the user posted a triumphant and vitriolic diatribe in another pro-Russia subreddit with some 33,500 members, r/EndlessWar. “We are making progress. We are purging the sub of all NAFO and NAFO-adjacent elements. Hundreds of them have been banned over the last 24 hours for various rule infractions, for being NAFO or NAFO-adjacent,” the user said, referencing the grassroots, pro-Ukrainian North Atlantic Fella Organization (NAFO) meme movement.

Several former users have reported they have indeed been banned from r/antiwar since the change in moderators. “If this subreddit cannot be explicitly against the invasion of Ukraine it will never truly be anti-war,” wrote one user Halcyon_Rein, in the antiwar subreddit on September 6. They then edited the post to say, “Edit: btw, I got f**king banned for this 💀💀💀”


A community hosted on Reddit was taken over by new moderators (T0151.011: Community Sub-Forum, T0150.005: Compromised). These moderators removed content posted to the community which favoured Ukraine over Russia (T0146.004: Administrator Account, T0151.011: Community Sub-Forum, T0124: Suppress Opposition). | +| [I00101 Pro-Putin Disinformation Warriors Take War of Aggression to Reddit](../../generated_pages/incidents/I00101.md) | This report looks at changes content posted to communities on Reddit (called Subreddits) after teams of voluntary moderators are replaced with what appear to be pro-Russian voices:

The r/antiwar subreddit appears to be a very recent takeover target. With 12,900 members it is not the largest community on Reddit, but this does place it squarely within the top 5% of all communities in terms of membership.

Three months ago a new moderator team was instated by subreddit head u/democracy101. Any posts documenting Russian aggression in Ukraine are now swiftly removed, while the board has been flooded with posts about how Ukraine is losing, or how American “neocons wrecked” the country.

The pinned post from moderator u/n0ahbody proclaims: “People who call for an end to Russian aggressions but not the Western aggressions Russia is reacting to don’t really want peace.” This user takes the view that any negative opinion about Russia is “shaped by what the fanatically Russophobic MSM wants you to think,” and that the United States is not threatened by its neighbors. Russia is.”

When u/n0ahbody took over the sub, the user posted a triumphant and vitriolic diatribe in another pro-Russia subreddit with some 33,500 members, r/EndlessWar. “We are making progress. We are purging the sub of all NAFO and NAFO-adjacent elements. Hundreds of them have been banned over the last 24 hours for various rule infractions, for being NAFO or NAFO-adjacent,” the user said, referencing the grassroots, pro-Ukrainian North Atlantic Fella Organization (NAFO) meme movement.

Several former users have reported they have indeed been banned from r/antiwar since the change in moderators. “If this subreddit cannot be explicitly against the invasion of Ukraine it will never truly be anti-war,” wrote one user Halcyon_Rein, in the antiwar subreddit on September 6. They then edited the post to say, “Edit: btw, I got f**king banned for this 💀💀💀”


A community hosted on Reddit was taken over by new moderators (T0151.011: Community Sub-Forum, T0150.005: Compromised Asset). These moderators removed content posted to the community which favoured Ukraine over Russia (T0146.004: Administrator Account Asset, T0151.011: Community Sub-Forum, T0124: Suppress Opposition). | diff --git a/generated_pages/techniques/T0129.005.md b/generated_pages/techniques/T0129.005.md index 6bf8c5f..697de8b 100644 --- a/generated_pages/techniques/T0129.005.md +++ b/generated_pages/techniques/T0129.005.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00113 Inside the Shadowy World of Disinformation for Hire in Kenya](../../generated_pages/incidents/I00113.md) | Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | +| [I00113 Inside the Shadowy World of Disinformation for Hire in Kenya](../../generated_pages/incidents/I00113.md) | Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account Asset, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | diff --git a/generated_pages/techniques/T0129.006.md b/generated_pages/techniques/T0129.006.md index 72ee752..3e05e66 100644 --- a/generated_pages/techniques/T0129.006.md +++ b/generated_pages/techniques/T0129.006.md @@ -8,8 +8,8 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | | [I00075 How Russia Meddles Abroad for Profit: Cash, Trolls and a Cult Leader](../../generated_pages/incidents/I00075.md) | “Only three of the Russian operatives identified by local hires of the campaign responded to requests for comment. All acknowledged visiting Madagascar last year, but only one admitted working as a pollster on behalf of the president.

“The others said they were simply tourists. Pyotr Korolyov, described as a sociologist on one spreadsheet, spent much of the summer of 2018 and fall hunched over a computer, deep in polling data at La Résidence Ankerana, a hotel the Russians used as their headquarters, until he was hospitalized with the measles, according to one person who worked with him.

“In an email exchange, Mr. Korolyov confirmed that he had come down with the measles, but rejected playing a role in a Russian operation. He did defend the idea of one, though.

““Russia should influence elections around the world, the same way the United States influences elections,” he wrote. “Sooner or later Russia will return to global politics as a global player,” he added. “And the American establishment will just have to accept that.””


This behaviour matches T0129.006: Deny Involvement because the actors contacted by journalists denied that they had participated in election interference (in spite of the evidence to the contrary). | -| [I00093 China Falsely Denies Disinformation Campaign Targeting Canada’s Prime Minister](../../generated_pages/incidents/I00093.md) | “On October 23, Canada’s Foreign Ministry said it had discovered a disinformation campaign, likely tied to China, aimed at discrediting dozens of Canadian politicians, including Prime Minister Justin Trudeau.

“The ministry said the campaign took place in August and September. It used new and hijacked social media accounts to bulk-post messages targeting Canadian politicians (T0146: Account, T0150.001: Newly Created, T0150.005: Compromised).

“A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.

““Canada was a downright liar and disseminator of false information… Beijing has never meddled in another nation’s domestic affairs.”

“A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.

“That is false.

“The Canadian government's report is based on an investigation conducted by its Rapid Response Mechanism cyber intelligence unit in cooperation with the social media platforms.

“The investigation exposed China’s disinformation campaign dubbed “Spamouflage” -- for its tactic of using “a network of new or hijacked social media accounts that posts and increases the number of propaganda messages across multiple social media platforms – including Facebook, X/Twitter, Instagram, YouTube, Medium, Reddit, TikTok, and LinkedIn.””


In this case a network of accounts attributed to China were identified operating on multiple platforms. The report was dismissed as false information by an official in the Chinese Embassy in Canada (T0143.001: Authentic Persona, T0097.111: Government Official Persona, T0129.006: Deny Involvement). | -| [I00102 Ignore The Poway Synagogue Shooter’s Manifesto: Pay Attention To 8chan’s /pol/ Board](../../generated_pages/incidents/I00102.md) | On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | +| [I00093 China Falsely Denies Disinformation Campaign Targeting Canada’s Prime Minister](../../generated_pages/incidents/I00093.md) | “On October 23, Canada’s Foreign Ministry said it had discovered a disinformation campaign, likely tied to China, aimed at discrediting dozens of Canadian politicians, including Prime Minister Justin Trudeau.

“The ministry said the campaign took place in August and September. It used new and hijacked social media accounts to bulk-post messages targeting Canadian politicians (T0146: Account Asset, T0150.001: Newly Created Asset, T0150.005: Compromised Asset).

“A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.

““Canada was a downright liar and disseminator of false information… Beijing has never meddled in another nation’s domestic affairs.”

“A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.

“That is false.

“The Canadian government's report is based on an investigation conducted by its Rapid Response Mechanism cyber intelligence unit in cooperation with the social media platforms.

“The investigation exposed China’s disinformation campaign dubbed “Spamouflage” -- for its tactic of using “a network of new or hijacked social media accounts that posts and increases the number of propaganda messages across multiple social media platforms – including Facebook, X/Twitter, Instagram, YouTube, Medium, Reddit, TikTok, and LinkedIn.””


In this case a network of accounts attributed to China were identified operating on multiple platforms. The report was dismissed as false information by an official in the Chinese Embassy in Canada (T0143.001: Authentic Persona, T0097.111: Government Official Persona, T0129.006: Deny Involvement). | +| [I00102 Ignore The Poway Synagogue Shooter’s Manifesto: Pay Attention To 8chan’s /pol/ Board](../../generated_pages/incidents/I00102.md) | On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | diff --git a/generated_pages/techniques/T0143.001.md b/generated_pages/techniques/T0143.001.md index 0aab81f..f2a1fe2 100644 --- a/generated_pages/techniques/T0143.001.md +++ b/generated_pages/techniques/T0143.001.md @@ -12,9 +12,9 @@ | [I00079 Three thousand fake tanks](../../generated_pages/incidents/I00079.md) | “On January 4 [2017], a little-known news site based in Donetsk, Ukraine published an article claiming that the United States was sending 3,600 tanks to Europe as part of “the NATO war preparation against Russia”.

“Like much fake news, this story started with a grain of truth: the US was about to reinforce its armored units in Europe. However, the article converted literally thousands of other vehicles — including hundreds of Humvees and trailers — into tanks, building the US force into something 20 times more powerful than it actually was.

“The story caught on online. Within three days it had been repeated by a dozen websites in the United States, Canada and Europe, and shared some 40,000 times. It was translated into Norwegian; quoted, unchallenged, by Russian state news agency RIA Novosti; and spread among Russian-language websites.

“It was also an obvious fake, as any Google news search would have revealed. Yet despite its evident falsehood, it spread widely, and not just in directly Kremlin-run media. Tracking the spread of this fake therefore shines a light on the wider question of how fake stories are dispersed.”


Russian state news agency RIA Novosti presents themselves as a news outlet (T0097.202: News Outlet Persona). RIO Novosti is a real news outlet (T0143.001: Authentic Persona), but it did not carry out a basic investigation into the veracity of the narrative they published implicitly expected of institutions presenting themselves as news outlets.

We can’t know how or why this narrative ended up being published by RIA Novosti, but we know that it presented a distorted reality as authentic information (T0023: Distort Facts), claiming that the US was sending 3,600 tanks, instead of 3,600 vehicles which included ~180 tanks. | | [I00084 Russia turns its diplomats into disinformation warriors](../../generated_pages/incidents/I00084.md) | “After the European Union banned Kremlin-backed media outlets and social media giants demoted their posts for peddling falsehoods about the war in Ukraine, Moscow has turned to its cadre of diplomats, government spokespeople and ministers — many of whom have extensive followings on social media — to promote disinformation about the conflict in Eastern Europe, according to four EU and United States officials.”

In this example authentic Russian government officials used their own accounts to promote false narratives (T0143.001: Authentic Persona, T0097.111: Government Official Persona).

The use of accounts managed by authentic Government / Diplomats to spread false narratives makes it harder for platforms to enforce content moderation, because of the political ramifications they may face for censoring elected officials (T0131: Exploit TOS/Content Moderation). For example, Twitter previously argued that official channels of world leaders are not removed due to the high public interest associated with their activities. | | [I00085 China’s large-scale media push: Attempts to influence Swedish media](../../generated_pages/incidents/I00085.md) | “Four media companies – Svenska Dagbladet, Expressen, Sveriges Radio, and Sveriges Television – stated that they had been contacted by the Chinese embassy on several occasions, and that they, for instance, had been criticized on their publications, both by letters and e-mails.

The media company Svenska Dagbladet, had been contacted on several occasions in the past two years, including via e-mails directly from the Chinese ambassador to Sweden. Several times, China and the Chinese ambassador had criticized the media company’s publications regarding the conditions in China. Individual reporters also reported having been subjected to criticism.

The tabloid Expressen had received several letters and e-mails from the embassy, e-mails containing criticism and threatening formulations regarding the coverage of the Swedish book publisher Gui Minhai, who has been imprisoned in China since 2015. Formulations such as “media tyranny” could be found in the e-mails.”


In this case, the Chinese ambassador is using their official role (T0143.001: Authentic Persona, T0097.111: Government Official Persona) to try to influence Swedish press. A government official trying to interfere in other countries' media activities could be a violation of press freedom. In this specific case, the Chinese diplomats are trying to silence criticism against China (T0139.002: Silence).” | -| [I00093 China Falsely Denies Disinformation Campaign Targeting Canada’s Prime Minister](../../generated_pages/incidents/I00093.md) | “On October 23, Canada’s Foreign Ministry said it had discovered a disinformation campaign, likely tied to China, aimed at discrediting dozens of Canadian politicians, including Prime Minister Justin Trudeau.

“The ministry said the campaign took place in August and September. It used new and hijacked social media accounts to bulk-post messages targeting Canadian politicians (T0146: Account, T0150.001: Newly Created, T0150.005: Compromised).

“A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.

““Canada was a downright liar and disseminator of false information… Beijing has never meddled in another nation’s domestic affairs.”

“A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.

“That is false.

“The Canadian government's report is based on an investigation conducted by its Rapid Response Mechanism cyber intelligence unit in cooperation with the social media platforms.

“The investigation exposed China’s disinformation campaign dubbed “Spamouflage” -- for its tactic of using “a network of new or hijacked social media accounts that posts and increases the number of propaganda messages across multiple social media platforms – including Facebook, X/Twitter, Instagram, YouTube, Medium, Reddit, TikTok, and LinkedIn.””


In this case a network of accounts attributed to China were identified operating on multiple platforms. The report was dismissed as false information by an official in the Chinese Embassy in Canada (T0143.001: Authentic Persona, T0097.111: Government Official Persona, T0129.006: Deny Involvement). | -| [I00118 ‘War Thunder’ players are once again leaking sensitive military technology information on a video game forum](../../generated_pages/incidents/I00118.md) | In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.

This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.

A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.

The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.

The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual.

[...]

A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.


A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). | -| [I00119 Independent journalist publishes Trump campaign document hacked by Iran despite election interference concerns](../../generated_pages/incidents/I00119.md) | An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog, T0150.003: Pre-Existing). | +| [I00093 China Falsely Denies Disinformation Campaign Targeting Canada’s Prime Minister](../../generated_pages/incidents/I00093.md) | “On October 23, Canada’s Foreign Ministry said it had discovered a disinformation campaign, likely tied to China, aimed at discrediting dozens of Canadian politicians, including Prime Minister Justin Trudeau.

“The ministry said the campaign took place in August and September. It used new and hijacked social media accounts to bulk-post messages targeting Canadian politicians (T0146: Account Asset, T0150.001: Newly Created Asset, T0150.005: Compromised Asset).

“A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.

““Canada was a downright liar and disseminator of false information… Beijing has never meddled in another nation’s domestic affairs.”

“A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.

“That is false.

“The Canadian government's report is based on an investigation conducted by its Rapid Response Mechanism cyber intelligence unit in cooperation with the social media platforms.

“The investigation exposed China’s disinformation campaign dubbed “Spamouflage” -- for its tactic of using “a network of new or hijacked social media accounts that posts and increases the number of propaganda messages across multiple social media platforms – including Facebook, X/Twitter, Instagram, YouTube, Medium, Reddit, TikTok, and LinkedIn.””


In this case a network of accounts attributed to China were identified operating on multiple platforms. The report was dismissed as false information by an official in the Chinese Embassy in Canada (T0143.001: Authentic Persona, T0097.111: Government Official Persona, T0129.006: Deny Involvement). | +| [I00118 ‘War Thunder’ players are once again leaking sensitive military technology information on a video game forum](../../generated_pages/incidents/I00118.md) | In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.

This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.

A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.

The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.

The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual.

[...]

A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.


A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account Asset, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). | +| [I00119 Independent journalist publishes Trump campaign document hacked by Iran despite election interference concerns](../../generated_pages/incidents/I00119.md) | An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account Asset, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog Asset, T0150.003: Pre-Existing Asset). | diff --git a/generated_pages/techniques/T0143.002.md b/generated_pages/techniques/T0143.002.md index d433755..35b1c27 100644 --- a/generated_pages/techniques/T0143.002.md +++ b/generated_pages/techniques/T0143.002.md @@ -19,10 +19,10 @@ | [I00089 Hackers Use Fake Facebook Profiles of Attractive Women to Spread Viruses, Steal Passwords](../../generated_pages/incidents/I00089.md) | “On Facebook, Rita, Alona and Christina appeared to be just like the millions of other U.S citizens sharing their lives with the world. They discussed family outings, shared emojis and commented on each other's photographs.

“In reality, the three accounts were part of a highly-targeted cybercrime operation, used to spread malware that was able to steal passwords and spy on victims.

“Hackers with links to Lebanon likely ran the covert scheme using a strain of malware dubbed "Tempting Cedar Spyware," according to researchers from Prague-based anti-virus company Avast, which detailed its findings in a report released on Wednesday.

“In a honey trap tactic as old as time, the culprits' targets were mostly male, and lured by fake attractive women. 

“In the attack, hackers would send flirtatious messages using Facebook to the chosen victims, encouraging them to download a second , booby-trapped, chat application known as Kik Messenger to have "more secure" conversations. Upon analysis, Avast experts found that "many fell for the trap.””


In this example threat actors took on the persona of a romantic suitor on Facebook, directing their targets to another platform (T0097:109 Romantic Suitor Persona, T0145.006: Attractive Person Account Imagery, T0143.002: Fabricated Persona). | | [I00091 Facebook uncovers Chinese network behind fake expert](../../generated_pages/incidents/I00091.md) | “Earlier in July [2021], an account posing as a Swiss biologist called Wilson Edwards had made statements on Facebook and Twitter that the United States was applying pressure on the World Health Organization scientists who were studying the origins of Covid-19 in an attempt to blame the virus on China.

“State media outlets, including CGTN, Shanghai Daily and Global Times, had cited the so-called biologist based on his Facebook profile.

“However, the Swiss embassy said in August that the person likely did not exist, as the Facebook account was opened only two weeks prior to its first post and only had three friends.

“It added "there was no registry of a Swiss citizen with the name "Wilson Edwards" and no academic articles under the name", and urged Chinese media outlets to take down any mention of him.

[...]

“It also said that his profile photo also appeared to have been generated using machine-learning capabilities.”


In this example an account created on Facebook presented itself as a Swiss biologist to present a narrative related to COVID-19 (T0143.002: Fabricated Persona, T0097.106: Researcher Persona). It used an AI-Generated profile picture to disguise itself (T0145.002: AI-Generated Account Imagery). | | [I00095 Meta: Chinese disinformation network was behind London front company recruiting content creators](../../generated_pages/incidents/I00095.md) | “A Chinese disinformation network operating fictitious employee personas across the internet used a front company in London to recruit content creators and translators around the world, according to Meta.

“The operation used a company called London New Europe Media, registered to an address on the upmarket Kensington High Street, that attempted to recruit real people to help it produce content. It is not clear how many people it ultimately recruited.

“London New Europe Media also “tried to engage individuals to record English-language videos scripted by the network,” in one case leading to a recording criticizing the United States being posted on YouTube, said Meta”.


In this example a front company was used (T0097.205: Business Persona) to enable actors to recruit targets for producing content (T0097.106: Recruiter Persona, T0143.002: Fabricated Persona). | -| [I00096 China ramps up use of AI misinformation](../../generated_pages/incidents/I00096.md) | The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | -| [I00120 factcheckUK or fakecheckUK? Reinventing the political faction as the impartial factchecker](../../generated_pages/incidents/I00120.md) | Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:


That is, until a few minutes into the debate.

All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.

The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.

The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it


In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing, T0146.003: Verified Account, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). | -| [I00121 Operation Overload: how pro-Russian actors flood newsrooms with fake content and seek to divert their efforts](../../generated_pages/incidents/I00121.md) | The unique aspect of Operation Overload is a barrage of emails sent to newsrooms and fact-checkers across Europe. The authors of these messages urge recipients to verify content allegedly found online. The email subject lines often include an incitement to verify the claims briefly described in the message body. This is followed by a short list of links directing recipients to posts on Telegram, X, or known pro-Russian websites, including Pravda and Sputnik.

We have collected 221 emails sent to 20 organisations. The organisations mostly received identical emails urging them to fact-check specific false stories, which demonstrates that the emails were sent as part of a larger coordinated campaign.

[...]

The authors of the emails do not hide their intention to see the fake content widely spread. In February 2024, a journalist at the German outlet CORRECTIV engaged with the sender of one of the emails, providing feedback on the narratives which were originally sent. CORRECTIV received a response from the same Gmail address, initially expressing respect and trust in CORRECTIV’s assessment, while asking: “is it possible for your work to be seen by as many people as possible?”, thereby clearly stating the goal of the operation.

[...]

All the emails come from authors posing as concerned citizens. All emails are sent with Gmail accounts, which is typical for personal use. This makes it challenging to identify the individuals behind these emails, as anyone can open a Gmail account for free. The email headers indicate that the messages were sent from the Gmail interface, not from a personal client which would disclose the sender’s IP address.


In this example, threat actors used gmail accounts (T0146.001: Free Account, T0097.100: Individual Persona, T0143.002: Fabricated Persona, T0153.001: Email Platform) to target journalists and fact-checkers, with the apparent goal of having them amplify operation narratives through fact checks. | -| [I00125 The Agency](../../generated_pages/incidents/I00125.md) | In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:

[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.

Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed).

A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. | +| [I00096 China ramps up use of AI misinformation](../../generated_pages/incidents/I00096.md) | The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account Asset, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account Asset, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | +| [I00120 factcheckUK or fakecheckUK? Reinventing the political faction as the impartial factchecker](../../generated_pages/incidents/I00120.md) | Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:

The evening of the 19th November 2019 saw the first of three Leaders’ Debates on ITV, starting at 8pm and lasting for an hour. Current Prime Minister and leader of the Conservatives, Boris Johnson faced off against Labour party leader, Jeremy Corbyn. Plenty of people will have been watching the debate live, but a good proportion were “watching” (er, “twitching”?) via Twitter. This is something I’ve done in the past for certain shows. In some cases I just can’t watch or listen, but I can read, and in other cases, the commentary is far more interesting and entertaining than the show itself will ever be. This, for me, is just such a case. But very quickly, all eyes turned upon a modestly sized account with the handle @CCHQPress. That’s short for Conservative Campaign Headquarters Press. According to their (current!) Twitter bio, they are based in Westminster and they provide “snippets of news and commentary from CCHQ” to their 75k followers.

That is, until a few minutes into the debate.

All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.

The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.

The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it


In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing Asset, T0146.003: Verified Account Asset, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). | +| [I00121 Operation Overload: how pro-Russian actors flood newsrooms with fake content and seek to divert their efforts](../../generated_pages/incidents/I00121.md) | The unique aspect of Operation Overload is a barrage of emails sent to newsrooms and fact-checkers across Europe. The authors of these messages urge recipients to verify content allegedly found online. The email subject lines often include an incitement to verify the claims briefly described in the message body. This is followed by a short list of links directing recipients to posts on Telegram, X, or known pro-Russian websites, including Pravda and Sputnik.

We have collected 221 emails sent to 20 organisations. The organisations mostly received identical emails urging them to fact-check specific false stories, which demonstrates that the emails were sent as part of a larger coordinated campaign.

[...]

The authors of the emails do not hide their intention to see the fake content widely spread. In February 2024, a journalist at the German outlet CORRECTIV engaged with the sender of one of the emails, providing feedback on the narratives which were originally sent. CORRECTIV received a response from the same Gmail address, initially expressing respect and trust in CORRECTIV’s assessment, while asking: “is it possible for your work to be seen by as many people as possible?”, thereby clearly stating the goal of the operation.

[...]

All the emails come from authors posing as concerned citizens. All emails are sent with Gmail accounts, which is typical for personal use. This makes it challenging to identify the individuals behind these emails, as anyone can open a Gmail account for free. The email headers indicate that the messages were sent from the Gmail interface, not from a personal client which would disclose the sender’s IP address.


In this example, threat actors used gmail accounts (T0146.001: Free Account Asset, T0097.100: Individual Persona, T0143.002: Fabricated Persona, T0153.001: Email Platform) to target journalists and fact-checkers, with the apparent goal of having them amplify operation narratives through fact checks. | +| [I00125 The Agency](../../generated_pages/incidents/I00125.md) | In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:

[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.

Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account Asset, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed Asset).

A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. | diff --git a/generated_pages/techniques/T0143.003.md b/generated_pages/techniques/T0143.003.md index 645007a..49cce8d 100644 --- a/generated_pages/techniques/T0143.003.md +++ b/generated_pages/techniques/T0143.003.md @@ -7,21 +7,21 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00064 Tinder nightmares: the promise and peril of political bots](../../generated_pages/incidents/I00064.md) | “In the days leading up to the UK’s [2019] general election, youths looking for love online encountered a whole new kind of Tinder nightmare. A group of young activists built a Tinder chatbot to co-opt profiles and persuade swing voters to support Labour. The bot accounts sent 30,000-40,000 messages to targeted 18-25 year olds in battleground constituencies like Dudley North, which Labour ended up winning by only 22 votes. [...]

“The activists maintain that the project was meant to foster democratic engagement. But screenshots of the bots’ activity expose a harsher reality. Images of conversations between real users and these bots, posted on i-D, Mashable, as well as on Fowler and Goodman’s public Twitter accounts, show that the bots did not identify themselves as automated accounts, instead posing as the user whose profile they had taken over. While conducting research for this story, it turned out that a number of [the reporters’ friends] living in Oxford had interacted with the bot in the lead up to the election and had no idea that it was not a real person.”


In this example people offered up their real accounts for the automation of political messaging; the actors convinced the users to give up access to their accounts to use in the operation. The actors maintained the accounts’ existing persona, and presented themselves as potential romantic suitors for legitimate platform users (T0097:109 Romantic Suitor Persona, T0143.003: Impersonated Persona, T0146: Account, T0150.007: Rented, T0151.017: Dating Platform). | -| [I00068 Attempted Audio Deepfake Call Targets LastPass Employee](../../generated_pages/incidents/I00068.md) | “While reports of [...] deepfake calls targeting private companies are luckily still rare, LastPass itself experienced a deepfake attempt earlier today that we are sharing with the larger community to help raise awareness that this tactic is spreading and all companies should be on the alert. In our case, an employee received a series of calls, texts, and at least one voicemail featuring an audio deepfake from a threat actor impersonating our CEO via WhatsApp. As the attempted communication was outside of normal business communication channels and due to the employee’s suspicion regarding the presence of many of the hallmarks of a social engineering attempt (such as forced urgency), our employee rightly ignored the messages and reported the incident to our internal security team so that we could take steps to both mitigate the threat and raise awareness of the tactic both internally and externally.”

In this example attackers created an account on WhatsApp which impersonated the CEO of lastpass (T0097.100: Individual Persona, T0143.003: Impersonated Persona, T0146: Account, T0151.004: Chat Platform, T0155.007: Encrypted Communication Channel). They used this asset to target an employee using deepfaked audio (T0088.001: Develop AI-Generated Audio (Deepfakes)). | +| [I00064 Tinder nightmares: the promise and peril of political bots](../../generated_pages/incidents/I00064.md) | “In the days leading up to the UK’s [2019] general election, youths looking for love online encountered a whole new kind of Tinder nightmare. A group of young activists built a Tinder chatbot to co-opt profiles and persuade swing voters to support Labour. The bot accounts sent 30,000-40,000 messages to targeted 18-25 year olds in battleground constituencies like Dudley North, which Labour ended up winning by only 22 votes. [...]

“The activists maintain that the project was meant to foster democratic engagement. But screenshots of the bots’ activity expose a harsher reality. Images of conversations between real users and these bots, posted on i-D, Mashable, as well as on Fowler and Goodman’s public Twitter accounts, show that the bots did not identify themselves as automated accounts, instead posing as the user whose profile they had taken over. While conducting research for this story, it turned out that a number of [the reporters’ friends] living in Oxford had interacted with the bot in the lead up to the election and had no idea that it was not a real person.”


In this example people offered up their real accounts for the automation of political messaging; the actors convinced the users to give up access to their accounts to use in the operation. The actors maintained the accounts’ existing persona, and presented themselves as potential romantic suitors for legitimate platform users (T0097:109 Romantic Suitor Persona, T0143.003: Impersonated Persona, T0146: Account Asset, T0150.007: Rented Asset, T0151.017: Dating Platform). | +| [I00068 Attempted Audio Deepfake Call Targets LastPass Employee](../../generated_pages/incidents/I00068.md) | “While reports of [...] deepfake calls targeting private companies are luckily still rare, LastPass itself experienced a deepfake attempt earlier today that we are sharing with the larger community to help raise awareness that this tactic is spreading and all companies should be on the alert. In our case, an employee received a series of calls, texts, and at least one voicemail featuring an audio deepfake from a threat actor impersonating our CEO via WhatsApp. As the attempted communication was outside of normal business communication channels and due to the employee’s suspicion regarding the presence of many of the hallmarks of a social engineering attempt (such as forced urgency), our employee rightly ignored the messages and reported the incident to our internal security team so that we could take steps to both mitigate the threat and raise awareness of the tactic both internally and externally.”

In this example attackers created an account on WhatsApp which impersonated the CEO of lastpass (T0097.100: Individual Persona, T0143.003: Impersonated Persona, T0146: Account Asset, T0151.004: Chat Platform, T0155.007: Encrypted Communication Channel). They used this asset to target an employee using deepfaked audio (T0088.001: Develop AI-Generated Audio (Deepfakes)). | | [I00069 Uncharmed: Untangling Iran's APT42 Operations](../../generated_pages/incidents/I00069.md) | “[Iranian state-sponsored cyber espionage actor] APT42 cloud operations attack lifecycle can be described in details as follows:

- “Social engineering schemes involving decoys and trust building, which includes masquerading as legitimate NGOs and conducting ongoing correspondence with the target, sometimes lasting several weeks.
- The threat actor masqueraded as well-known international organizations in the legal and NGO fields and sent emails from domains typosquatting the original NGO domains, for example aspenlnstitute[.]org.
- The Aspen Institute became aware of this spoofed domain and collaborated with industry partners, including blocking it in SafeBrowsing, thus protecting users of Google Chrome and additional browsers.
- To increase their credibility, APT42 impersonated high-ranking personnel working at the aforementioned organizations when creating the email personas.
- APT42 enhanced their campaign credibility by using decoy material inviting targets to legitimate and relevant events and conferences. In one instance, the decoy material was hosted on an attacker-controlled SharePoint folder, accessible only after the victim entered their credentials. Mandiant did not identify malicious elements in the files, suggesting they were used solely to gain the victim’s trust.”


In this example APT42, an Iranian state-sponsored cyber espionage actor, created a domain impersonating the existing NGO The Aspen Institute (T0143.003: Impersonated Persona, T0097.207: NGO Persona). They increased the perceived legitimacy of the impersonation by also impersonating high-ranking employees of the NGO (T0097.100: Individual Persona, T0143.003: Impersonated Persona). | | [I00070 Eli Lilly Clarifies It’s Not Offering Free Insulin After Tweet From Fake Verified Account—As Chaos Unfolds On Twitter](../../generated_pages/incidents/I00070.md) | “Twitter Blue launched [November 2022], giving any users who pay $8 a month the ability to be verified on the site, a feature previously only available to public figures, government officials and journalists as a way to show they are who they claim to be.

“[A day after the launch], an account with the handle @EliLillyandCo labeled itself with the name “Eli Lilly and Company,” and by using the same logo as the company in its profile picture and with the verification checkmark, was indistinguishable from the real company (the picture has since been removed and the account has labeled itself as a parody profile).

The parody account tweeted “we are excited to announce insulin is free now.””


In this example an account impersonated the pharmaceutical company Eli Lilly (T0097.205: Business Persona, T0143.003: Impersonated Persona) by copying its name, profile picture (T0145.001: Copy Account Imagery), and paying for verification. | -| [I00071 Russia-aligned hacktivists stir up anti-Ukrainian sentiments in Poland](../../generated_pages/incidents/I00071.md) | “The August 17 [2022] Telegram post [which contained a falsified letter from the Ukrainian Minister of Foreign Affairs asking Poland to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII] also contained screenshots of Facebook posts that appeared on two Facebook accounts belonging to Polish nationals Piotr Górka, an expert in the history of the Polish Air Force, and Dariusz Walusiak, a Polish historian and documentary maker. The Górka post suggested that he fully supported the Polish government’s decision to change Belwederska Street to Stepan Bandera Street.

“In a statement to the DFRLab, Górka said his account was accessed without his consent. “This is not my post loaded to my Facebook page,” he explained. “My site was hacked, some days ago.” At the time of publishing, Piotr Górka’s post and his Facebook account were no longer accessible.

“The post on Górka’s Facebook page was shared by Dariusz Walusiak’s Facebook account; the account also reposted it on the Facebook walls of more than twenty other Facebook users, including Adam Kalita, currently working at Krakow branch of the Institute of National Remembrance; Jan Kasprzyk, head of the Office for War Veterans and Victims of Oppression; and Alicja Kondraciuk, a Polish public figure living in Krakow.

“Walusiak’s Facebook account is also no longer accessible. Given his work on Polish history and identity, it seems highly unlikely he would support the Bandera measure; the DFRLab has also reached out to him for comment.

“The fact that Joker DPR’s Telegram post included screenshots of their Facebook posts raises the strong possibility that both Facebook accounts were compromised, and that hackers planted false statements on their pages that would seem out of character for them in order to gain further attention to the forged documents.”


In this example, threat actors used compromised accounts of Polish historians who have enough relevant knowledge to plausibly weigh in on the forged letter’s narrative (T0143.003: Impersonated Persona, T0097.101: Local Persona, T0097.108: Expert Persona, T0146: Account, T0150.005: Compromised, T0151.001: Social Media Platform).

This matches T0097.108: Expert Persona because the impersonation exploited Górka and Walusiak’s existing personas as experts in Polish history. | +| [I00071 Russia-aligned hacktivists stir up anti-Ukrainian sentiments in Poland](../../generated_pages/incidents/I00071.md) | “The August 17 [2022] Telegram post [which contained a falsified letter from the Ukrainian Minister of Foreign Affairs asking Poland to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII] also contained screenshots of Facebook posts that appeared on two Facebook accounts belonging to Polish nationals Piotr Górka, an expert in the history of the Polish Air Force, and Dariusz Walusiak, a Polish historian and documentary maker. The Górka post suggested that he fully supported the Polish government’s decision to change Belwederska Street to Stepan Bandera Street.

“In a statement to the DFRLab, Górka said his account was accessed without his consent. “This is not my post loaded to my Facebook page,” he explained. “My site was hacked, some days ago.” At the time of publishing, Piotr Górka’s post and his Facebook account were no longer accessible.

“The post on Górka’s Facebook page was shared by Dariusz Walusiak’s Facebook account; the account also reposted it on the Facebook walls of more than twenty other Facebook users, including Adam Kalita, currently working at Krakow branch of the Institute of National Remembrance; Jan Kasprzyk, head of the Office for War Veterans and Victims of Oppression; and Alicja Kondraciuk, a Polish public figure living in Krakow.

“Walusiak’s Facebook account is also no longer accessible. Given his work on Polish history and identity, it seems highly unlikely he would support the Bandera measure; the DFRLab has also reached out to him for comment.

“The fact that Joker DPR’s Telegram post included screenshots of their Facebook posts raises the strong possibility that both Facebook accounts were compromised, and that hackers planted false statements on their pages that would seem out of character for them in order to gain further attention to the forged documents.”


In this example, threat actors used compromised accounts of Polish historians who have enough relevant knowledge to plausibly weigh in on the forged letter’s narrative (T0143.003: Impersonated Persona, T0097.101: Local Persona, T0097.108: Expert Persona, T0146: Account Asset, T0150.005: Compromised Asset, T0151.001: Social Media Platform).

This matches T0097.108: Expert Persona because the impersonation exploited Górka and Walusiak’s existing personas as experts in Polish history. | | [I00075 How Russia Meddles Abroad for Profit: Cash, Trolls and a Cult Leader](../../generated_pages/incidents/I00075.md) | “In the campaign’s final weeks, Pastor Mailhol said, the team of Russians made a request: Drop out of the race and support Mr. Rajoelina. He refused.

“The Russians made the same proposal to the history professor running for president, saying, “If you accept this deal you will have money” according to Ms. Rasamimanana, the professor’s campaign manager.

When the professor refused, she said, the Russians created a fake Facebook page that mimicked his official page and posted an announcement on it that he was supporting Mr. Rajoelina.”


In this example actors created online accounts styled to look like official pages to trick targets into thinking that the presidential candidate announced that they had dropped out of the election (T0097.110: Party Official Persona, T0143.003: Impersonated Persona) | | [I00076 Network of Social Media Accounts Impersonates U.S. Political Candidates, Leverages U.S. and Israeli Media in Support of Iranian Interests](../../generated_pages/incidents/I00076.md) | “Some Twitter accounts in the network [of inauthentic accounts attributed to Iran] impersonated Republican political candidates that ran for House of Representatives seats in the 2018 U.S. congressional midterms. These accounts appropriated the candidates’ photographs and, in some cases, plagiarized tweets from the real individuals’ accounts. Aside from impersonating real U.S. political candidates, the behavior and activity of these accounts resembled that of the others in the network.

“For example, the account @livengood_marla impersonated Marla Livengood, a 2018 candidate for California’s 9th Congressional District, using a photograph of Livengood and a campaign banner for its profile and background pictures. The account began tweeting on Sept. 24, 2018, with its first tweet plagiarizing one from Livengood’s official account earlier that month”

[...]

“In another example, the account @ButlerJineea impersonated Jineea Butler, a 2018 candidate for New York’s 13th Congressional District, using a photograph of Butler for its profile picture and incorporating her campaign slogans into its background picture, as well as claiming in its Twitter bio to be a “US House candidate, NY-13” and linking to Butler’s website, jineeabutlerforcongress[.]com.”


In this example actors impersonated existing political candidates (T0097.110: Member of Political Party Persona, T0143.003: Impersonated Persona), strengthening the impersonation by copying legitimate accounts’ imagery (T0145.001: Copy Account Imagery), and copying its previous posts (T0084.002: Plagiarise Content). | | [I00082 Meta’s November 2021 Adversarial Threat Report ](../../generated_pages/incidents/I00082.md) | “[Meta] removed a network of accounts in Vietnam for violating our Inauthentic Behavior policy against mass reporting. They coordinated the targeting of activists and other people who publicly criticized the Vietnamese government and used false reports of various violations in an attempt to have these users removed from our platform. The people behind this activity relied primarily on authentic and duplicate accounts to submit hundreds — in some cases, thousands — of complaints against their targets through our abuse reporting flows.

“Many operators also maintained fake accounts — some of which were detected and disabled by our automated systems — to pose as their targets so they could then report the legitimate accounts as fake. They would frequently change the gender and name of their fake accounts to resemble the target individual. Among the most common claims in this misleading reporting activity were complaints of impersonation, and to a much lesser extent inauthenticity. The network also advertised abusive services in their bios and constantly evolved their tactics in an attempt to evade detection.“


In this example actors repurposed their accounts to impersonate targeted activists (T0097.103: Activist Persona, T0143.003: Impersonated Persona) in order to falsely report the activists’ legitimate accounts as impersonations (T0124.001: Report Non-Violative Opposing Content). | | [I00087 Challenging Truth and Trust: A Global Inventory of Organized Social Media Manipulation](../../generated_pages/incidents/I00087.md) | “Another actor operating in China is the American-based company Devumi. Most of the Twitter accounts managed by Devumi resemble real people, and some are even associated with a kind of large-scale social identity theft. At least 55,000 of the accounts use the names, profile pictures, hometowns and other personal details of real Twitter users, including minors, according to The New York Times (Confessore et al., 2018)).”

In this example accounts impersonated real locals while spreading operation narratives (T0143.003: Impersonated Persona, T0097.101: Local Persona). The impersonation included stealing the legitimate accounts’ profile pictures (T0145.001: Copy Account Imagery). | | [I00094 A glimpse inside a Chinese influence campaign: How bogus news websites blur the line between true and false](../../generated_pages/incidents/I00094.md) | Researchers identified websites managed by a Chinese marketing firm which presented themselves as news organisations.

“On its official website, the Chinese marketing firm boasted that they were in contact with news organizations across the globe, including one in South Korea called the “Chungcheng Times.” According to the joint team, this outlet is a fictional news organization created by the offending company. The Chinese company sought to disguise the site’s true identity and purpose by altering the name attached to it by one character—making it very closely resemble the name of a legitimate outlet operating out of Chungchengbuk-do.

“The marketing firm also established a news organization under the Korean name “Gyeonggido Daily,” which closely resembles legitimate news outlets operating out of Gyeonggi province such as “Gyeonggi Daily,” “Daily Gyeonggi Newspaper,” and “Gyeonggi N Daily.” One of the fake news sites was named “Incheon Focus,” a title that could be easily mistaken for the legitimate local news outlet, “Focus Incheon.” Furthermore, the Chinese marketing company operated two fake news sites with names identical to two separate local news organizations, one of which ceased operations in December 2022.

“In total, fifteen out of eighteen Chinese fake news sites incorporated the correct names of real regions in their fake company names. “If the operators had created fake news sites similar to major news organizations based in Seoul, however, the intended deception would have easily been uncovered,” explained Song Tae-eun, an assistant professor in the Department of National Security & Unification Studies at the Korea National Diplomatic Academy, to The Readable. “There is also the possibility that they are using the regional areas as an attempt to form ties with the local community; that being the government, the private sector, and religious communities.””


The firm styled their news site to resemble existing local news outlets in their target region (T0097.201: Local Institution Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona). | | [I00107 The Lies Russia Tells Itself](../../generated_pages/incidents/I00107.md) | The Moscow firm Social Design Agency (SDA) has been attributed as being behind a Russian disinformation project known as Doppelganger:

The SDA’s deception work first surfaced in 2022, likely almost immediately after Doppelganger got off the ground. In April of that year, Meta, the parent company of Facebook and Instagram, disclosed in a quarterly report that it had removed from its platforms “a network of about 200 accounts operated from Russia.” By August 2022, German investigative journalists revealed that they had discovered forgeries of about 30 news sites, including many of the country’s biggest media outlets—Frankfurter Allgemeine, Der Spiegel, and Bild—but also Britain’s Daily Mail and France’s 20 Minutes. The sites had deceptive URLs such as www-dailymail-co-uk.dailymail.top.


As part of the SDA’s work, they created many websites which impersonated existing media outlets. Sites used domain impersonation tactics to increase perceived legitimacy of their impersonations (T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0152.003: Website Hosting Platform, T0149.003: Lookalike Domain). | -| [I00116 Blue-tick scammers target consumers who complain on X](../../generated_pages/incidents/I00116.md) | Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account, T0146.003: Verified Account, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created). | -| [I00126 Charming Kitten Updates POWERSTAR with an InterPlanetary Twist](../../generated_pages/incidents/I00126.md) | The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware). | +| [I00116 Blue-tick scammers target consumers who complain on X](../../generated_pages/incidents/I00116.md) | Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account Asset, T0146.003: Verified Account Asset, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created Asset). | +| [I00126 Charming Kitten Updates POWERSTAR with an InterPlanetary Twist](../../generated_pages/incidents/I00126.md) | The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain Asset) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware Asset). | | [I00127 Iranian APTs Dress Up as Hacktivists for Disruption, Influence Ops](../../generated_pages/incidents/I00127.md) | Iranian state-backed advanced persistent threat (APT) groups have been masquerading as hacktivists, claiming attacks against Israeli critical infrastructure and air defense systems.

[...]

What's clearer are the benefits of the model itself: creating a layer of plausible deniability for the state, and the impression among the public that their attacks are grassroots-inspired. While this deniability has always been a key driver with state-sponsored cyberattacks, researchers characterized this instance as noteworthy for the effort behind the charade.

"We've seen a lot of hacktivist activity that seems to be nation-states trying to have that 'deniable' capability," Adam Meyers, CrowdStrike senior vice president for counter adversary operations said in a press conference this week. "And so these groups continue to maintain activity, moving from what was traditionally website defacements and DDoS attacks, into a lot of hack and leak operations."

To sell the persona, faketivists like to adopt the aesthetic, rhetoric, tactics, techniques, and procedures (TTPs), and sometimes the actual names and iconography associated with legitimate hacktivist outfits. Keen eyes will spot that they typically arise just after major geopolitical events, without an established history of activity, in alignment with the interests of their government sponsors.

Oftentimes, it's difficult to separate the faketivists from the hacktivists, as each might promote and support the activities of the other.


In this example analysts from CrowdStrike assert that hacker groups took on the persona of hacktivists to disguise the state-backed nature of their cyber attack campaign (T0097.104: Hacktivist Persona). At times state-backed hacktivists will impersonate existing hacktivist organisations (T0097.104: Hacktivist Persona, T0143.003: Impersonated Persona). | -| [I00129 Teen who hacked Joe Biden and Bill Gates' Twitter accounts sentenced to three years in prison](../../generated_pages/incidents/I00129.md) | An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.

Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.

Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.

Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.


In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account, T0150.005: Compromised, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account, T0143.003: Impersonated Persona, T0150.005: Compromised, T0151.008: Microblogging Platform).

The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). | +| [I00129 Teen who hacked Joe Biden and Bill Gates' Twitter accounts sentenced to three years in prison](../../generated_pages/incidents/I00129.md) | An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.

Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.

Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.

Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.


In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account Asset, T0150.005: Compromised Asset, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account Asset, T0143.003: Impersonated Persona, T0150.005: Compromised Asset, T0151.008: Microblogging Platform).

The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). | diff --git a/generated_pages/techniques/T0146.001.md b/generated_pages/techniques/T0146.001.md index 9ac7799..371023c 100644 --- a/generated_pages/techniques/T0146.001.md +++ b/generated_pages/techniques/T0146.001.md @@ -1,4 +1,4 @@ -# Technique T0146.001: Free Account +# Technique T0146.001: Free Account Asset * **Summary**: Many online platforms allow users to create free accounts on their platform. A Free Account is an Account which does not require payment at account creation and is not subscribed to paid platform features. @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00121 Operation Overload: how pro-Russian actors flood newsrooms with fake content and seek to divert their efforts](../../generated_pages/incidents/I00121.md) | The unique aspect of Operation Overload is a barrage of emails sent to newsrooms and fact-checkers across Europe. The authors of these messages urge recipients to verify content allegedly found online. The email subject lines often include an incitement to verify the claims briefly described in the message body. This is followed by a short list of links directing recipients to posts on Telegram, X, or known pro-Russian websites, including Pravda and Sputnik.

We have collected 221 emails sent to 20 organisations. The organisations mostly received identical emails urging them to fact-check specific false stories, which demonstrates that the emails were sent as part of a larger coordinated campaign.

[...]

The authors of the emails do not hide their intention to see the fake content widely spread. In February 2024, a journalist at the German outlet CORRECTIV engaged with the sender of one of the emails, providing feedback on the narratives which were originally sent. CORRECTIV received a response from the same Gmail address, initially expressing respect and trust in CORRECTIV’s assessment, while asking: “is it possible for your work to be seen by as many people as possible?”, thereby clearly stating the goal of the operation.

[...]

All the emails come from authors posing as concerned citizens. All emails are sent with Gmail accounts, which is typical for personal use. This makes it challenging to identify the individuals behind these emails, as anyone can open a Gmail account for free. The email headers indicate that the messages were sent from the Gmail interface, not from a personal client which would disclose the sender’s IP address.


In this example, threat actors used gmail accounts (T0146.001: Free Account, T0097.100: Individual Persona, T0143.002: Fabricated Persona, T0153.001: Email Platform) to target journalists and fact-checkers, with the apparent goal of having them amplify operation narratives through fact checks. | +| [I00121 Operation Overload: how pro-Russian actors flood newsrooms with fake content and seek to divert their efforts](../../generated_pages/incidents/I00121.md) | The unique aspect of Operation Overload is a barrage of emails sent to newsrooms and fact-checkers across Europe. The authors of these messages urge recipients to verify content allegedly found online. The email subject lines often include an incitement to verify the claims briefly described in the message body. This is followed by a short list of links directing recipients to posts on Telegram, X, or known pro-Russian websites, including Pravda and Sputnik.

We have collected 221 emails sent to 20 organisations. The organisations mostly received identical emails urging them to fact-check specific false stories, which demonstrates that the emails were sent as part of a larger coordinated campaign.

[...]

The authors of the emails do not hide their intention to see the fake content widely spread. In February 2024, a journalist at the German outlet CORRECTIV engaged with the sender of one of the emails, providing feedback on the narratives which were originally sent. CORRECTIV received a response from the same Gmail address, initially expressing respect and trust in CORRECTIV’s assessment, while asking: “is it possible for your work to be seen by as many people as possible?”, thereby clearly stating the goal of the operation.

[...]

All the emails come from authors posing as concerned citizens. All emails are sent with Gmail accounts, which is typical for personal use. This makes it challenging to identify the individuals behind these emails, as anyone can open a Gmail account for free. The email headers indicate that the messages were sent from the Gmail interface, not from a personal client which would disclose the sender’s IP address.


In this example, threat actors used gmail accounts (T0146.001: Free Account Asset, T0097.100: Individual Persona, T0143.002: Fabricated Persona, T0153.001: Email Platform) to target journalists and fact-checkers, with the apparent goal of having them amplify operation narratives through fact checks. | diff --git a/generated_pages/techniques/T0146.002.md b/generated_pages/techniques/T0146.002.md index 76b756b..20e6dcb 100644 --- a/generated_pages/techniques/T0146.002.md +++ b/generated_pages/techniques/T0146.002.md @@ -1,4 +1,4 @@ -# Technique T0146.002: Paid Account +# Technique T0146.002: Paid Account Asset * **Summary**: Some online platforms afford accounts extra features, or other benefits, if the user pays a fee. For example, as of September 2024, content posted by a Paid Account on X (previously Twitter) is prioritised in the platform’s algorithm. @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00116 Blue-tick scammers target consumers who complain on X](../../generated_pages/incidents/I00116.md) | Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account, T0146.003: Verified Account, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created). | +| [I00116 Blue-tick scammers target consumers who complain on X](../../generated_pages/incidents/I00116.md) | Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account Asset, T0146.003: Verified Account Asset, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created Asset). | diff --git a/generated_pages/techniques/T0146.003.md b/generated_pages/techniques/T0146.003.md index c417290..d7927e7 100644 --- a/generated_pages/techniques/T0146.003.md +++ b/generated_pages/techniques/T0146.003.md @@ -1,4 +1,4 @@ -# Technique T0146.003: Verified Account +# Technique T0146.003: Verified Account Asset * **Summary**: Some online platforms apply badges of verification to accounts which meet certain criteria.

On some platforms (such as dating apps) a verification badge signifies that the account has passed the platform’s identity verification checks. On some platforms (such as X (previously Twitter)) a verification badge signifies that an account has paid for the platform’s service. @@ -7,9 +7,9 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00113 Inside the Shadowy World of Disinformation for Hire in Kenya](../../generated_pages/incidents/I00113.md) | Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”. The report touches upon how actors gained access to twitter accounts, and what personas they presented:

Verified accounts are complicit. One influencer we spoke to mentioned that the people who own coveted “blue check” accounts will often rent them out for disinformation campaigns. These verified accounts can improve the campaign’s chances of trending. Says one interviewee: “The owner of the account usually receives a cut of the campaign loot”.

[...]

Many of the accounts we examined appear to give an aura of authenticity, but in reality they are not authentic. Simply looking at their date of creation won’t give you a hint as to their purpose. We had to dig deeper. The profile pictures and content of some of the accounts gave us the answers we were looking for. A common tactic these accounts utilize is using suggestive pictures of women to bait men into following them, or at least pay attention. In terms of content, many of these accounts tweeted off the same hashtags for days on end and will constantly retweet a specific set of accounts.


Actors participating in this operation rented out verified Twitter accounts (in 2021 a checkmark on Twitter verified a user’s identity), which were repurposed and used updated account imagery (T0146.003: Verified Account, T0150.007: Rented, T0150.004: Repurposed, T00145.006: Attractive Person Account Imagery). | -| [I00116 Blue-tick scammers target consumers who complain on X](../../generated_pages/incidents/I00116.md) | Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account, T0146.003: Verified Account, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created). | -| [I00120 factcheckUK or fakecheckUK? Reinventing the political faction as the impartial factchecker](../../generated_pages/incidents/I00120.md) | Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:


That is, until a few minutes into the debate.

All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.

The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.

The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it

In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing, T0146.003: Verified Account, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). | +| [I00113 Inside the Shadowy World of Disinformation for Hire in Kenya](../../generated_pages/incidents/I00113.md) | Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”. The report touches upon how actors gained access to twitter accounts, and what personas they presented:

Verified accounts are complicit. One influencer we spoke to mentioned that the people who own coveted “blue check” accounts will often rent them out for disinformation campaigns. These verified accounts can improve the campaign’s chances of trending. Says one interviewee: “The owner of the account usually receives a cut of the campaign loot”.

[...]

Many of the accounts we examined appear to give an aura of authenticity, but in reality they are not authentic. Simply looking at their date of creation won’t give you a hint as to their purpose. We had to dig deeper. The profile pictures and content of some of the accounts gave us the answers we were looking for. A common tactic these accounts utilize is using suggestive pictures of women to bait men into following them, or at least pay attention. In terms of content, many of these accounts tweeted off the same hashtags for days on end and will constantly retweet a specific set of accounts.


Actors participating in this operation rented out verified Twitter accounts (in 2021 a checkmark on Twitter verified a user’s identity), which were repurposed and used updated account imagery (T0146.003: Verified Account Asset, T0150.007: Rented Asset, T0150.004: Repurposed Asset, T00145.006: Attractive Person Account Imagery). | +| [I00116 Blue-tick scammers target consumers who complain on X](../../generated_pages/incidents/I00116.md) | Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account Asset, T0146.003: Verified Account Asset, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created Asset). | +| [I00120 factcheckUK or fakecheckUK? Reinventing the political faction as the impartial factchecker](../../generated_pages/incidents/I00120.md) | Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:

The evening of the 19th November 2019 saw the first of three Leaders’ Debates on ITV, starting at 8pm and lasting for an hour. Current Prime Minister and leader of the Conservatives, Boris Johnson faced off against Labour party leader, Jeremy Corbyn. Plenty of people will have been watching the debate live, but a good proportion were “watching” (er, “twitching”?) via Twitter. This is something I’ve done in the past for certain shows. In some cases I just can’t watch or listen, but I can read, and in other cases, the commentary is far more interesting and entertaining than the show itself will ever be. This, for me, is just such a case. But very quickly, all eyes turned upon a modestly sized account with the handle @CCHQPress. That’s short for Conservative Campaign Headquarters Press. According to their (current!) Twitter bio, they are based in Westminster and they provide “snippets of news and commentary from CCHQ” to their 75k followers.

That is, until a few minutes into the debate.

All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.

The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.

The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it


In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing Asset, T0146.003: Verified Account Asset, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). | diff --git a/generated_pages/techniques/T0146.004.md b/generated_pages/techniques/T0146.004.md index 3f7b8ff..bf3628a 100644 --- a/generated_pages/techniques/T0146.004.md +++ b/generated_pages/techniques/T0146.004.md @@ -1,4 +1,4 @@ -# Technique T0146.004: Administrator Account +# Technique T0146.004: Administrator Account Asset * **Summary**: Some accounts will have special privileges / will be in control of the Digital Community Hosting Asset; for example, the Admin of a Facebook Page, a Moderator of a Subreddit, etc. etc. @@ -7,8 +7,8 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00101 Pro-Putin Disinformation Warriors Take War of Aggression to Reddit](../../generated_pages/incidents/I00101.md) | This report looks at changes content posted to communities on Reddit (called Subreddits) after teams of voluntary moderators are replaced with what appear to be pro-Russian voices:

The r/antiwar subreddit appears to be a very recent takeover target. With 12,900 members it is not the largest community on Reddit, but this does place it squarely within the top 5% of all communities in terms of membership.

Three months ago a new moderator team was instated by subreddit head u/democracy101. Any posts documenting Russian aggression in Ukraine are now swiftly removed, while the board has been flooded with posts about how Ukraine is losing, or how American “neocons wrecked” the country.

The pinned post from moderator u/n0ahbody proclaims: “People who call for an end to Russian aggressions but not the Western aggressions Russia is reacting to don’t really want peace.” This user takes the view that any negative opinion about Russia is “shaped by what the fanatically Russophobic MSM wants you to think,” and that the United States is not threatened by its neighbors. Russia is.”

When u/n0ahbody took over the sub, the user posted a triumphant and vitriolic diatribe in another pro-Russia subreddit with some 33,500 members, r/EndlessWar. “We are making progress. We are purging the sub of all NAFO and NAFO-adjacent elements. Hundreds of them have been banned over the last 24 hours for various rule infractions, for being NAFO or NAFO-adjacent,” the user said, referencing the grassroots, pro-Ukrainian North Atlantic Fella Organization (NAFO) meme movement.

Several former users have reported they have indeed been banned from r/antiwar since the change in moderators. “If this subreddit cannot be explicitly against the invasion of Ukraine it will never truly be anti-war,” wrote one user Halcyon_Rein, in the antiwar subreddit on September 6. They then edited the post to say, “Edit: btw, I got f**king banned for this 💀💀💀”


A community hosted on Reddit was taken over by new moderators (T0151.011: Community Sub-Forum, T0150.005: Compromised). These moderators removed content posted to the community which favoured Ukraine over Russia (T0146.004: Administrator Account, T0151.011: Community Sub-Forum, T0124: Suppress Opposition). | -| [I00129 Teen who hacked Joe Biden and Bill Gates' Twitter accounts sentenced to three years in prison](../../generated_pages/incidents/I00129.md) | An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.

Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.

Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.

Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.


In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account, T0150.005: Compromised, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account, T0143.003: Impersonated Persona, T0150.005: Compromised, T0151.008: Microblogging Platform).

The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). | +| [I00101 Pro-Putin Disinformation Warriors Take War of Aggression to Reddit](../../generated_pages/incidents/I00101.md) | This report looks at changes content posted to communities on Reddit (called Subreddits) after teams of voluntary moderators are replaced with what appear to be pro-Russian voices:

The r/antiwar subreddit appears to be a very recent takeover target. With 12,900 members it is not the largest community on Reddit, but this does place it squarely within the top 5% of all communities in terms of membership.

Three months ago a new moderator team was instated by subreddit head u/democracy101. Any posts documenting Russian aggression in Ukraine are now swiftly removed, while the board has been flooded with posts about how Ukraine is losing, or how American “neocons wrecked” the country.

The pinned post from moderator u/n0ahbody proclaims: “People who call for an end to Russian aggressions but not the Western aggressions Russia is reacting to don’t really want peace.” This user takes the view that any negative opinion about Russia is “shaped by what the fanatically Russophobic MSM wants you to think,” and that the United States is not threatened by its neighbors. Russia is.”

When u/n0ahbody took over the sub, the user posted a triumphant and vitriolic diatribe in another pro-Russia subreddit with some 33,500 members, r/EndlessWar. “We are making progress. We are purging the sub of all NAFO and NAFO-adjacent elements. Hundreds of them have been banned over the last 24 hours for various rule infractions, for being NAFO or NAFO-adjacent,” the user said, referencing the grassroots, pro-Ukrainian North Atlantic Fella Organization (NAFO) meme movement.

Several former users have reported they have indeed been banned from r/antiwar since the change in moderators. “If this subreddit cannot be explicitly against the invasion of Ukraine it will never truly be anti-war,” wrote one user Halcyon_Rein, in the antiwar subreddit on September 6. They then edited the post to say, “Edit: btw, I got f**king banned for this 💀💀💀”


A community hosted on Reddit was taken over by new moderators (T0151.011: Community Sub-Forum, T0150.005: Compromised Asset). These moderators removed content posted to the community which favoured Ukraine over Russia (T0146.004: Administrator Account Asset, T0151.011: Community Sub-Forum, T0124: Suppress Opposition). | +| [I00129 Teen who hacked Joe Biden and Bill Gates' Twitter accounts sentenced to three years in prison](../../generated_pages/incidents/I00129.md) | An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.

Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.

Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.

Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.


In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account Asset, T0150.005: Compromised Asset, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account Asset, T0143.003: Impersonated Persona, T0150.005: Compromised Asset, T0151.008: Microblogging Platform).

The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). | diff --git a/generated_pages/techniques/T0146.005.md b/generated_pages/techniques/T0146.005.md index 1572d0c..f0e90e3 100644 --- a/generated_pages/techniques/T0146.005.md +++ b/generated_pages/techniques/T0146.005.md @@ -7,8 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00116 Blue-tick scammers target consumers who complain on X](../../generated_pages/incidents/I00116.md) | Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account, T0146.003: Verified Account, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created). | -| [I00129 Teen who hacked Joe Biden and Bill Gates' Twitter accounts sentenced to three years in prison](../../generated_pages/incidents/I00129.md) | An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.

Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.

Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.

Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.


In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account, T0150.005: Compromised, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account, T0143.003: Impersonated Persona, T0150.005: Compromised, T0151.008: Microblogging Platform).

The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). | +| [I00116 Blue-tick scammers target consumers who complain on X](../../generated_pages/incidents/I00116.md) | Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account Asset, T0146.003: Verified Account Asset, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created Asset). | diff --git a/generated_pages/techniques/T0146.006.md b/generated_pages/techniques/T0146.006.md index 693cfea..d8303cc 100644 --- a/generated_pages/techniques/T0146.006.md +++ b/generated_pages/techniques/T0146.006.md @@ -8,7 +8,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | | [I00100 Why ThisPersonDoesNotExist (and its copycats) need to be restricted](../../generated_pages/incidents/I00100.md) | You might have heard about the recent viral sensation, ThisPersonDoesNotExist.com, a website, launched two weeks ago, that uses Nvidia’s publicly available artificial intelligence technology to draw an invented, photo-realistic human being with each refresh. The tech is impressive and artistically evocative. It’s also irresponsible and needs to be restricted immediately.

[...]

Prior to this technology, scammers faced three major risks when using fake photos. Each of these risks had the potential to put them out business, or in jail.

Risk #1: Someone recognizes the photo. While the odds of this are long-shot, it does happen.

Risk #2: Someone reverse image searches the photo with a service like TinEye or Google Image Search and finds that it’s been posted elsewhere. Reverse image search is one of the top anti-fraud measures recommended by consumer protection advocates.

Risk #3: If the crime is successful, law enforcement uses the fake photo to figure out the scammer’s identity after the fact. Perhaps the scammer used an old classmate’s photo. Perhaps their personal account follows the Instagram member they pilfered. And so on: people make mistakes.

The problem with AI-generated photos is that they carry none of these risks. No one will recognize a human who’s never existed before. Google Image Search will return 0 results, possibly instilling a false sense of security in the searcher. And AI-generated photos don’t give law enforcement much to work with.


ThisPersonDoesNotExist is an online platform which, when visited, produces AI generated images of peoples’ faces (T0146.006: Open Access Platform, T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)). | -| [I00102 Ignore The Poway Synagogue Shooter’s Manifesto: Pay Attention To 8chan’s /pol/ Board](../../generated_pages/incidents/I00102.md) | On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | +| [I00102 Ignore The Poway Synagogue Shooter’s Manifesto: Pay Attention To 8chan’s /pol/ Board](../../generated_pages/incidents/I00102.md) | On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | diff --git a/generated_pages/techniques/T0146.007.md b/generated_pages/techniques/T0146.007.md index 6ed03c7..06a983c 100644 --- a/generated_pages/techniques/T0146.007.md +++ b/generated_pages/techniques/T0146.007.md @@ -1,4 +1,4 @@ -# Technique T0146.007: Automated Account +# Technique T0146.007: Automated Account Asset * **Summary**: An Automated Account is an account which is displaying automated behaviour, such as republishing or liking other accounts’ content, or publishing their own content. diff --git a/generated_pages/techniques/T0146.md b/generated_pages/techniques/T0146.md index 2ab56ca..bde9c45 100644 --- a/generated_pages/techniques/T0146.md +++ b/generated_pages/techniques/T0146.md @@ -1,4 +1,4 @@ -# Technique T0146: Account +# Technique T0146: Account Asset * **Summary**: An Account is a user-specific profile that allows access to the features and services of an online platform, typically requiring a username and password for authentication. @@ -7,12 +7,12 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00103 The racist AI deepfake that fooled and divided a community](../../generated_pages/incidents/I00103.md) | “I seriously don't understand why I have to constantly put up with these dumbasses here every day.”

So began what appeared to be a long tirade from the principal of Pikesville High School, punctuated with racist, antisemitic and offensive tropes. It sounded like it had been secretly recorded.

The speaker went on to bemoan “ungrateful black kids” and Jewish people in the community.

The clip, first posted in [January 2024], went viral nationally. But it really struck a nerve in the peaceful, leafy suburb of Pikesville, which has large black and Jewish populations, and in the nearby city of Baltimore, Maryland. Principal Eric Eiswert was put on paid administrative leave pending an investigation.

[...]

But what those sharing the clip didn’t realise at the time was that another bombshell was about to drop: the clip was an AI-generated fake.

[...]

[In April 2024], Baltimore Police Chief Robert McCullough confirmed they now had “conclusive evidence that the recording was not authentic”.

And they believed they knew who made the fake.

Police charged 31-year-old Dazhon Darien, the school’s athletics director, with several counts related to the fake video. Charges included theft, retaliating against a witness and stalking.

He was arrested at the airport, where police say he was planning to fly to Houston, Texas.

Police say that Mr Darien had been under investigation by Principal Eiswert over an alleged theft of $1,916 (£1,460) from the school. They also allege there had been “work performance challenges” and his contract was likely not to be renewed.

Their theory was that by creating the deepfake recording, he hoped to discredit the principal before he could be fired.

Investigators say they traced an email used to send the original video to a server connected to Mr Darien, and allege that he used Baltimore County Public Schools' computer network to access AI tools. He is due to stand trial in December 2024.


By associating Mr Darien to the server used to email the original AI generated audio, investigators link Darien to the fabricated content (T0149.005: Server, T0088.001: AI Generated Audio (Deepfakes)). They also assert that Darien used computers owned by the school to access platforms used to generate the audio (T0146: Account, T0154.002: AI Media Platform). | -| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform). | -| [I00110 How COVID-19 conspiracists and extremists use crowdfunding platforms to fund their activities](../../generated_pages/incidents/I00110.md) | The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:



More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | -| [I00111 Patreon Is Bankrolling Climate Change Deniers While We All Burn](../../generated_pages/incidents/I00111.md) | In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access). | -| [I00118 ‘War Thunder’ players are once again leaking sensitive military technology information on a video game forum](../../generated_pages/incidents/I00118.md) | In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.

This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.

A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.

The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.

The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual.

[...]

A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.


A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). | -| [I00125 The Agency](../../generated_pages/incidents/I00125.md) | In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:

[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.

Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed).

A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. | +| [I00103 The racist AI deepfake that fooled and divided a community](../../generated_pages/incidents/I00103.md) | “I seriously don't understand why I have to constantly put up with these dumbasses here every day.”

So began what appeared to be a long tirade from the principal of Pikesville High School, punctuated with racist, antisemitic and offensive tropes. It sounded like it had been secretly recorded.

The speaker went on to bemoan “ungrateful black kids” and Jewish people in the community.

The clip, first posted in [January 2024], went viral nationally. But it really struck a nerve in the peaceful, leafy suburb of Pikesville, which has large black and Jewish populations, and in the nearby city of Baltimore, Maryland. Principal Eric Eiswert was put on paid administrative leave pending an investigation.

[...]

But what those sharing the clip didn’t realise at the time was that another bombshell was about to drop: the clip was an AI-generated fake.

[...]

[In April 2024], Baltimore Police Chief Robert McCullough confirmed they now had “conclusive evidence that the recording was not authentic”.

And they believed they knew who made the fake.

Police charged 31-year-old Dazhon Darien, the school’s athletics director, with several counts related to the fake video. Charges included theft, retaliating against a witness and stalking.

He was arrested at the airport, where police say he was planning to fly to Houston, Texas.

Police say that Mr Darien had been under investigation by Principal Eiswert over an alleged theft of $1,916 (£1,460) from the school. They also allege there had been “work performance challenges” and his contract was likely not to be renewed.

Their theory was that by creating the deepfake recording, he hoped to discredit the principal before he could be fired.

Investigators say they traced an email used to send the original video to a server connected to Mr Darien, and allege that he used Baltimore County Public Schools' computer network to access AI tools. He is due to stand trial in December 2024.


By associating Mr Darien to the server used to email the original AI generated audio, investigators link Darien to the fabricated content (T0149.005: Server Asset, T0088.001: AI Generated Audio (Deepfakes)). They also assert that Darien used computers owned by the school to access platforms used to generate the audio (T0146: Account Asset, T0154.002: AI Media Platform). | +| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform). | +| [I00110 How COVID-19 conspiracists and extremists use crowdfunding platforms to fund their activities](../../generated_pages/incidents/I00110.md) | The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:

More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | +| [I00111 Patreon Is Bankrolling Climate Change Deniers While We All Burn](../../generated_pages/incidents/I00111.md) | In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account Asset, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access Asset). | +| [I00118 ‘War Thunder’ players are once again leaking sensitive military technology information on a video game forum](../../generated_pages/incidents/I00118.md) | In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.

This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.

A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.

The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.

The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual.

[...]

A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.


A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account Asset, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). | +| [I00125 The Agency](../../generated_pages/incidents/I00125.md) | In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:

[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.

Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account Asset, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed Asset).

A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. | diff --git a/generated_pages/techniques/T0147.001.md b/generated_pages/techniques/T0147.001.md index 910b739..21a302c 100644 --- a/generated_pages/techniques/T0147.001.md +++ b/generated_pages/techniques/T0147.001.md @@ -1,4 +1,4 @@ -# Technique T0147.001: Game +# Technique T0147.001: Game Asset * **Summary**: A Game is Software which has been designed for interactive entertainment, where users take on challenges set by the game’s designers.

While Online Game Platforms allow people to play with each other, Games are designed for single player experiences. @@ -7,8 +7,8 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00098 Gaming The System: How Extremists Exploit Gaming Sites And What Can Be Done To Counter Them](../../generated_pages/incidents/I00098.md) | This report looks at how extremists exploit games and gaming adjacent platforms:

Ethnic Cleansing, a first-person shooter game created by the U.S. white supremacist organization National Alliance in 2002, was advertised as follows: “[T]he Race War has already begun. Your character, Will, runs through a ghetto blasting away at various blacks and spics to attempt to gain entrance to the subway system...where the jews have hidden to avoid the carnage. Then you get to blow away jews as they scream ‘Oy Vey!’ on your way to the command center.”

While games like Ethnic Cleansing —and others of a similar genocidal variety, including titles like Shoot the Blacks, Hatred, and Muslim Massacre —generate some press attention and controversy, they tend to be ignored by the large majority of gamers, who consider them “crude” and “badly designed.” Nevertheless, these games are still available in some corners of the Internet and have been downloaded by hundreds of thousands of users.

A second strategy involves the modification (“modding”) of existing video games to twist the narrative into an extremist message or fantasy. One example is the Islamic State terrorist group’s modding of the first-person shooter video game, ARMA III, to make Islamic fighters the heroic protagonists rather than the villains. With their powerful immersive quality, these video games have, in some instances, been effective at inspiring and allegedly training extremists to perpetrate realworld attacks.

Third, extremist actors have used in-game chat functions to open lines of communication with ideological sympathizers and potential recruits. Although the lack of in-game communication data has made it impossible for academic researchers to track the presence of extremists in a systematic way, there is enough anecdotal evidence—including evidence obtained from police investigation files—to infer that in-game chatrooms can and do function as “radicalization funnels” in at least some cases.


White supremacists created a game aligned with their ideology (T0147.001: Game). The Islamic State terrorist group created a mod of the game ARMA 3 (T0151.015: Online Game Platform, T0147.002: Game Mod). Extremists also use communication features available in online games to recruit new members. | -| [I00105 Gaming the System: The Use of Gaming-Adjacent Communication, Game and Mod Platforms by Extremist Actors](../../generated_pages/incidents/I00105.md) | In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Indie DB [is a platform that serves] to present indie games, which are titles from independent, small developer teams, which can be discussed and downloaded [Indie DB].

[...]

[On Indie DB we] found antisemitic, Islamist, sexist and other discriminatory content during the exploration. Both games and comments were located that made positive references to National Socialism. Radicalised users seem to use the opportunities to network, create groups, and communicate in forums. In addition, a number of member profiles with graphic propaganda content could be located. We found several games with propagandistic content advertised on the platform, which caused apparently radicalised users to form a fan community around those games. For example, there are titles such as the antisemitic Fursan Al-Aqsa: The Knights of the Al-Aqsa Mosque, which has won the Best Hardcore Game award at the Game Connection America 2024 Game Development Awards and which has received antisemitic praise on its review page. In the game, players need to target members of the Israeli Defence Forces, and attacks by Palestinian terrorist groups such as Hamas or Lions’ Den can be re-enacted. These include bomb attacks, beheadings and the re-enactment of the terrorist attack in Israel on 7 October 2023. We, therefore, deem Indie DB to be relevant for further analyses of extremist activities in digital gaming spaces.


Indie DB is an online software delivery platform on which users can create groups, and participate in discussion forums (T0152.009: Software Delivery Platform, T0151.002: Online Community Group, T0151.009: Legacy Online Forum Platform). The platform hosted games which allowed players to reenact terrorist attacks (T0147.001: Game). | +| [I00098 Gaming The System: How Extremists Exploit Gaming Sites And What Can Be Done To Counter Them](../../generated_pages/incidents/I00098.md) | This report looks at how extremists exploit games and gaming adjacent platforms:

Ethnic Cleansing, a first-person shooter game created by the U.S. white supremacist organization National Alliance in 2002, was advertised as follows: “[T]he Race War has already begun. Your character, Will, runs through a ghetto blasting away at various blacks and spics to attempt to gain entrance to the subway system...where the jews have hidden to avoid the carnage. Then you get to blow away jews as they scream ‘Oy Vey!’ on your way to the command center.”

While games like Ethnic Cleansing —and others of a similar genocidal variety, including titles like Shoot the Blacks, Hatred, and Muslim Massacre —generate some press attention and controversy, they tend to be ignored by the large majority of gamers, who consider them “crude” and “badly designed.” Nevertheless, these games are still available in some corners of the Internet and have been downloaded by hundreds of thousands of users.

A second strategy involves the modification (“modding”) of existing video games to twist the narrative into an extremist message or fantasy. One example is the Islamic State terrorist group’s modding of the first-person shooter video game, ARMA III, to make Islamic fighters the heroic protagonists rather than the villains. With their powerful immersive quality, these video games have, in some instances, been effective at inspiring and allegedly training extremists to perpetrate realworld attacks.

Third, extremist actors have used in-game chat functions to open lines of communication with ideological sympathizers and potential recruits. Although the lack of in-game communication data has made it impossible for academic researchers to track the presence of extremists in a systematic way, there is enough anecdotal evidence—including evidence obtained from police investigation files—to infer that in-game chatrooms can and do function as “radicalization funnels” in at least some cases.


White supremacists created a game aligned with their ideology (T0147.001: Game Asset). The Islamic State terrorist group created a mod of the game ARMA 3 (T0151.015: Online Game Platform, T0147.002: Game Mod Asset). Extremists also use communication features available in online games to recruit new members. | +| [I00105 Gaming the System: The Use of Gaming-Adjacent Communication, Game and Mod Platforms by Extremist Actors](../../generated_pages/incidents/I00105.md) | In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Indie DB [is a platform that serves] to present indie games, which are titles from independent, small developer teams, which can be discussed and downloaded [Indie DB].

[...]

[On Indie DB we] found antisemitic, Islamist, sexist and other discriminatory content during the exploration. Both games and comments were located that made positive references to National Socialism. Radicalised users seem to use the opportunities to network, create groups, and communicate in forums. In addition, a number of member profiles with graphic propaganda content could be located. We found several games with propagandistic content advertised on the platform, which caused apparently radicalised users to form a fan community around those games. For example, there are titles such as the antisemitic Fursan Al-Aqsa: The Knights of the Al-Aqsa Mosque, which has won the Best Hardcore Game award at the Game Connection America 2024 Game Development Awards and which has received antisemitic praise on its review page. In the game, players need to target members of the Israeli Defence Forces, and attacks by Palestinian terrorist groups such as Hamas or Lions’ Den can be re-enacted. These include bomb attacks, beheadings and the re-enactment of the terrorist attack in Israel on 7 October 2023. We, therefore, deem Indie DB to be relevant for further analyses of extremist activities in digital gaming spaces.


Indie DB is an online software delivery platform on which users can create groups, and participate in discussion forums (T0152.009: Software Delivery Platform, T0151.002: Online Community Group, T0151.009: Legacy Online Forum Platform). The platform hosted games which allowed players to reenact terrorist attacks (T0147.001: Game Asset). | diff --git a/generated_pages/techniques/T0147.002.md b/generated_pages/techniques/T0147.002.md index b2fc42c..5780150 100644 --- a/generated_pages/techniques/T0147.002.md +++ b/generated_pages/techniques/T0147.002.md @@ -1,4 +1,4 @@ -# Technique T0147.002: Game Mod +# Technique T0147.002: Game Mod Asset * **Summary**: A Game Mod is a modification which can be applied to a Game or Multiplayer Online Game to add new content or functionality to the game.

Users can Modify Games to introduce new content to the game. Modified Games can be distributed on Software Delivery Platforms such as Steam or can be distributed within the Game or Multiplayer Online Game. @@ -7,8 +7,8 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00098 Gaming The System: How Extremists Exploit Gaming Sites And What Can Be Done To Counter Them](../../generated_pages/incidents/I00098.md) | This report looks at how extremists exploit games and gaming adjacent platforms:

Ethnic Cleansing, a first-person shooter game created by the U.S. white supremacist organization National Alliance in 2002, was advertised as follows: “[T]he Race War has already begun. Your character, Will, runs through a ghetto blasting away at various blacks and spics to attempt to gain entrance to the subway system...where the jews have hidden to avoid the carnage. Then you get to blow away jews as they scream ‘Oy Vey!’ on your way to the command center.”

While games like Ethnic Cleansing —and others of a similar genocidal variety, including titles like Shoot the Blacks, Hatred, and Muslim Massacre —generate some press attention and controversy, they tend to be ignored by the large majority of gamers, who consider them “crude” and “badly designed.” Nevertheless, these games are still available in some corners of the Internet and have been downloaded by hundreds of thousands of users.

A second strategy involves the modification (“modding”) of existing video games to twist the narrative into an extremist message or fantasy. One example is the Islamic State terrorist group’s modding of the first-person shooter video game, ARMA III, to make Islamic fighters the heroic protagonists rather than the villains. With their powerful immersive quality, these video games have, in some instances, been effective at inspiring and allegedly training extremists to perpetrate realworld attacks.

Third, extremist actors have used in-game chat functions to open lines of communication with ideological sympathizers and potential recruits. Although the lack of in-game communication data has made it impossible for academic researchers to track the presence of extremists in a systematic way, there is enough anecdotal evidence—including evidence obtained from police investigation files—to infer that in-game chatrooms can and do function as “radicalization funnels” in at least some cases.


White supremacists created a game aligned with their ideology (T0147.001: Game). The Islamic State terrorist group created a mod of the game ARMA 3 (T0151.015: Online Game Platform, T0147.002: Game Mod). Extremists also use communication features available in online games to recruit new members. | -| [I00105 Gaming the System: The Use of Gaming-Adjacent Communication, Game and Mod Platforms by Extremist Actors](../../generated_pages/incidents/I00105.md) | In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Gamebanana and Mod DB are so-called modding platforms that allow users to post their modifications of existing (popular) games. In the process of modding, highly radicalised content can be inserted into games that did not originally contain it. All of these platforms also have communication functions and customisable profiles.

[...]

During the explorations, several modifications with hateful themes were located, including right-wing extremist, racist, antisemitic and Islamist content. This includes mods that make it possible to play as terrorists or National Socialists. So-called “skins” (textures that change the appearance of models in the game) for characters from first-person shooters are particularly popular and contain references to National Socialism or Islamist terrorist organisations. Although some of this content could be justified with reference to historical accuracy and realism, the user profiles of the creators and commentators often reveal political motivations. Names with neo-Nazi codes or the use of avatars showing members of the Wehrmacht or the Waffen SS, for example, indicate a certain degree of positive appreciation or fascination with right-wing ideology, as do affirmations in the comment columns.

Mod DB in particular has attracted public attention in the past. For example, a mod for the game Half-Life 2 made it possible to play a school shooting with the weapons used during the attacks at Columbine High School (1999) and Virginia Polytechnic Institute and State University (2007). Antisemitic memes and jokes are shared in several groups on the platform. It seems as if users partially connect with each other because of shared political views. There were also indications that Islamist and right-wing extremist users network on the basis of shared views on women, Jews or homosexuals. In addition to relevant usernames and avatars, we found profiles featuring picture galleries, backgrounds and banners dedicated to the SS. Extremist propaganda and radicalisation processes on modding platforms have not been explored yet, but our exploration suggests these digital spaces to be highly relevant for our field.


Mod DB is a platform which allows users to upload mods for games, which other users can download (T0152.009: Software Delivery Platform, T0147.002: Game Mod). | +| [I00098 Gaming The System: How Extremists Exploit Gaming Sites And What Can Be Done To Counter Them](../../generated_pages/incidents/I00098.md) | This report looks at how extremists exploit games and gaming adjacent platforms:

Ethnic Cleansing, a first-person shooter game created by the U.S. white supremacist organization National Alliance in 2002, was advertised as follows: “[T]he Race War has already begun. Your character, Will, runs through a ghetto blasting away at various blacks and spics to attempt to gain entrance to the subway system...where the jews have hidden to avoid the carnage. Then you get to blow away jews as they scream ‘Oy Vey!’ on your way to the command center.”

While games like Ethnic Cleansing —and others of a similar genocidal variety, including titles like Shoot the Blacks, Hatred, and Muslim Massacre —generate some press attention and controversy, they tend to be ignored by the large majority of gamers, who consider them “crude” and “badly designed.” Nevertheless, these games are still available in some corners of the Internet and have been downloaded by hundreds of thousands of users.

A second strategy involves the modification (“modding”) of existing video games to twist the narrative into an extremist message or fantasy. One example is the Islamic State terrorist group’s modding of the first-person shooter video game, ARMA III, to make Islamic fighters the heroic protagonists rather than the villains. With their powerful immersive quality, these video games have, in some instances, been effective at inspiring and allegedly training extremists to perpetrate realworld attacks.

Third, extremist actors have used in-game chat functions to open lines of communication with ideological sympathizers and potential recruits. Although the lack of in-game communication data has made it impossible for academic researchers to track the presence of extremists in a systematic way, there is enough anecdotal evidence—including evidence obtained from police investigation files—to infer that in-game chatrooms can and do function as “radicalization funnels” in at least some cases.


White supremacists created a game aligned with their ideology (T0147.001: Game Asset). The Islamic State terrorist group created a mod of the game ARMA 3 (T0151.015: Online Game Platform, T0147.002: Game Mod Asset). Extremists also use communication features available in online games to recruit new members. | +| [I00105 Gaming the System: The Use of Gaming-Adjacent Communication, Game and Mod Platforms by Extremist Actors](../../generated_pages/incidents/I00105.md) | In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Gamebanana and Mod DB are so-called modding platforms that allow users to post their modifications of existing (popular) games. In the process of modding, highly radicalised content can be inserted into games that did not originally contain it. All of these platforms also have communication functions and customisable profiles.

[...]

During the explorations, several modifications with hateful themes were located, including right-wing extremist, racist, antisemitic and Islamist content. This includes mods that make it possible to play as terrorists or National Socialists. So-called “skins” (textures that change the appearance of models in the game) for characters from first-person shooters are particularly popular and contain references to National Socialism or Islamist terrorist organisations. Although some of this content could be justified with reference to historical accuracy and realism, the user profiles of the creators and commentators often reveal political motivations. Names with neo-Nazi codes or the use of avatars showing members of the Wehrmacht or the Waffen SS, for example, indicate a certain degree of positive appreciation or fascination with right-wing ideology, as do affirmations in the comment columns.

Mod DB in particular has attracted public attention in the past. For example, a mod for the game Half-Life 2 made it possible to play a school shooting with the weapons used during the attacks at Columbine High School (1999) and Virginia Polytechnic Institute and State University (2007). Antisemitic memes and jokes are shared in several groups on the platform. It seems as if users partially connect with each other because of shared political views. There were also indications that Islamist and right-wing extremist users network on the basis of shared views on women, Jews or homosexuals. In addition to relevant usernames and avatars, we found profiles featuring picture galleries, backgrounds and banners dedicated to the SS. Extremist propaganda and radicalisation processes on modding platforms have not been explored yet, but our exploration suggests these digital spaces to be highly relevant for our field.


Mod DB is a platform which allows users to upload mods for games, which other users can download (T0152.009: Software Delivery Platform, T0147.002: Game Mod Asset). | diff --git a/generated_pages/techniques/T0147.003.md b/generated_pages/techniques/T0147.003.md index d3b732b..c02584d 100644 --- a/generated_pages/techniques/T0147.003.md +++ b/generated_pages/techniques/T0147.003.md @@ -1,4 +1,4 @@ -# Technique T0147.003: Malware +# Technique T0147.003: Malware Asset * **Summary**: Malware is Software which has been designed to cause harm or facilitate malicious behaviour on electronic devices.

DISARM recommends using the [MITRE ATT&CK Framework](https://attack.mitre.org/) to document malware types and their usage. @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00126 Charming Kitten Updates POWERSTAR with an InterPlanetary Twist](../../generated_pages/incidents/I00126.md) | The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware). | +| [I00126 Charming Kitten Updates POWERSTAR with an InterPlanetary Twist](../../generated_pages/incidents/I00126.md) | The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain Asset) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware Asset). | diff --git a/generated_pages/techniques/T0147.004.md b/generated_pages/techniques/T0147.004.md index 4f14a38..aa78e9a 100644 --- a/generated_pages/techniques/T0147.004.md +++ b/generated_pages/techniques/T0147.004.md @@ -1,4 +1,4 @@ -# Technique T0147.004: Mobile App +# Technique T0147.004: Mobile App Asset * **Summary**: A Mobile App is an application which has been designed to run on mobile operating systems, such as Android or iOS.

Mobile Apps can enable access to online platforms (e.g. Facebook’s mobile app) or can provide software which users can run offline on their device. diff --git a/generated_pages/techniques/T0147.md b/generated_pages/techniques/T0147.md index 087713c..a796711 100644 --- a/generated_pages/techniques/T0147.md +++ b/generated_pages/techniques/T0147.md @@ -1,4 +1,4 @@ -# Technique T0147: Software +# Technique T0147: Software Asset * **Summary**: A Software is a program developed to run on computers or devices that helps users achieve specific goals, such as improving productivity, automating tasks, or having fun. diff --git a/generated_pages/techniques/T0148.001.md b/generated_pages/techniques/T0148.001.md index 0e3856c..7f8c6f3 100644 --- a/generated_pages/techniques/T0148.001.md +++ b/generated_pages/techniques/T0148.001.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00113 Inside the Shadowy World of Disinformation for Hire in Kenya](../../generated_pages/incidents/I00113.md) | Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | +| [I00113 Inside the Shadowy World of Disinformation for Hire in Kenya](../../generated_pages/incidents/I00113.md) | Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account Asset, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | diff --git a/generated_pages/techniques/T0148.002.md b/generated_pages/techniques/T0148.002.md index 57899e9..10c21b3 100644 --- a/generated_pages/techniques/T0148.002.md +++ b/generated_pages/techniques/T0148.002.md @@ -1,4 +1,4 @@ -# Technique T0148.002: Bank Account +# Technique T0148.002: Bank Account Asset * **Summary**: A Bank Account is a financial account that allows individuals or organisations to store, manage, and access their money, typically for saving, spending, or investment purposes. @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00113 Inside the Shadowy World of Disinformation for Hire in Kenya](../../generated_pages/incidents/I00113.md) | Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | +| [I00113 Inside the Shadowy World of Disinformation for Hire in Kenya](../../generated_pages/incidents/I00113.md) | Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account Asset, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | diff --git a/generated_pages/techniques/T0148.003.md b/generated_pages/techniques/T0148.003.md index d492453..3798569 100644 --- a/generated_pages/techniques/T0148.003.md +++ b/generated_pages/techniques/T0148.003.md @@ -7,8 +7,8 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform). | -| [I00112 Patreon allows disinformation and conspiracies to be monetised in Spain](../../generated_pages/incidents/I00112.md) | In this report EU DisinfoLab identified 17 Spanish accounts that monetise and/or spread QAnon content or other conspiracy theories on Patreon. Content produced by these accounts go against Patreon’s stated policy of removing creators that “advance disinformation promoting the QAnon conspiracy theory”. EU DisinfoLab found:

In most cases, the creators monetise the content directly on Patreon (posts are only accessible for people sponsoring the creators) but there are also cases of indirect monetization (monetization through links leading to other platforms), an aspect that was flagged and analysed by Eu DisinfoLab in the mentioned previous report.

Some creators display links that redirects users to other platforms such as YouTube or LBRY where they can monetise their content. Some even offer almost all of their videos for free by redirecting to their YouTube channel.

Another modus operandi is for the creators to advertise on Patreon that they are looking for financing through PayPal or provide the author’s email to explore other financing alternatives.

[...]

Sometimes the content offered for a fee on Patreon is freely accessible on other platforms. Creators openly explain that they seek voluntary donation on Patreon, but that their creations will be public on YouTube. This means that the model of these platforms does not always involve a direct monetisation of the content. Creators who have built a strong reputation previously on other platforms can use Patreon as a platform to get some sponsorship which is not related to the content and give them more freedom to create.

Some users explicitly claim to use Patreon as a secondary channel to evade “censorship” by other platforms such as YouTube. Patreon seems to be perceived as a safe haven for disinformation and fringe content that has been suppressed for violating the policies of other platforms. For example, Jorge Guerra points out how Patreon acts as a back-up channel to go to in case of censorship by YouTube. Meanwhile, Alfa Mind openly claims to use Patreon to publish content that is not allowed on YouTube. “Exclusive access to videos that are prohibited on YouTube. These videos are only accessible on Patreon, as their content is very explicit and shocking”, are offered to the patrons who pay €3 per month.

In spite of Patreon’s stated policy, actors use accounts on their platform to generate revenue or donations for their content, and to provide a space to host content which was removed from other platforms (T0146: Account, T0152.012: Subscription Service Platform, T0121.001: Bypass Content Bocking).

Some actors were observed accepting donations via PayPal (T0146: Account, T0148.003: Payment Processing Platform). | +| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform). | +| [I00112 Patreon allows disinformation and conspiracies to be monetised in Spain](../../generated_pages/incidents/I00112.md) | In this report EU DisinfoLab identified 17 Spanish accounts that monetise and/or spread QAnon content or other conspiracy theories on Patreon. Content produced by these accounts go against Patreon’s stated policy of removing creators that “advance disinformation promoting the QAnon conspiracy theory”. EU DisinfoLab found:

In most cases, the creators monetise the content directly on Patreon (posts are only accessible for people sponsoring the creators) but there are also cases of indirect monetization (monetization through links leading to other platforms), an aspect that was flagged and analysed by Eu DisinfoLab in the mentioned previous report.

Some creators display links that redirects users to other platforms such as YouTube or LBRY where they can monetise their content. Some even offer almost all of their videos for free by redirecting to their YouTube channel.

Another modus operandi is for the creators to advertise on Patreon that they are looking for financing through PayPal or provide the author’s email to explore other financing alternatives.

[...]

Sometimes the content offered for a fee on Patreon is freely accessible on other platforms. Creators openly explain that they seek voluntary donation on Patreon, but that their creations will be public on YouTube. This means that the model of these platforms does not always involve a direct monetisation of the content. Creators who have built a strong reputation previously on other platforms can use Patreon as a platform to get some sponsorship which is not related to the content and give them more freedom to create.

Some users explicitly claim to use Patreon as a secondary channel to evade “censorship” by other platforms such as YouTube. Patreon seems to be perceived as a safe haven for disinformation and fringe content that has been suppressed for violating the policies of other platforms. For example, Jorge Guerra points out how Patreon acts as a back-up channel to go to in case of censorship by YouTube. Meanwhile, Alfa Mind openly claims to use Patreon to publish content that is not allowed on YouTube. “Exclusive access to videos that are prohibited on YouTube. These videos are only accessible on Patreon, as their content is very explicit and shocking”, are offered to the patrons who pay €3 per month.


In spite of Patreon’s stated policy, actors use accounts on their platform to generate revenue or donations for their content, and to provide a space to host content which was removed from other platforms (T0146: Account Asset, T0152.012: Subscription Service Platform, T0121.001: Bypass Content Bocking).

Some actors were observed accepting donations via PayPal (T0146: Account Asset, T0148.003: Payment Processing Platform). | diff --git a/generated_pages/techniques/T0148.004.md b/generated_pages/techniques/T0148.004.md index 109f3be..34184be 100644 --- a/generated_pages/techniques/T0148.004.md +++ b/generated_pages/techniques/T0148.004.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform)., This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain) reveal a website set up to sell merchandise (T0152.004: Website, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.<.i>

Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server, T0149.006: IP Address). | +| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform)., This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain Asset) reveal a website set up to sell merchandise (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website Asset, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.


Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain Asset, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server Asset, T0149.006: IP Address Asset). | diff --git a/generated_pages/techniques/T0148.006.md b/generated_pages/techniques/T0148.006.md index 4b2f558..a597e50 100644 --- a/generated_pages/techniques/T0148.006.md +++ b/generated_pages/techniques/T0148.006.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00110 How COVID-19 conspiracists and extremists use crowdfunding platforms to fund their activities](../../generated_pages/incidents/I00110.md) | The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:



More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | +| [I00110 How COVID-19 conspiracists and extremists use crowdfunding platforms to fund their activities](../../generated_pages/incidents/I00110.md) | The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:

More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | diff --git a/generated_pages/techniques/T0148.007.md b/generated_pages/techniques/T0148.007.md index 5e994df..95e978c 100644 --- a/generated_pages/techniques/T0148.007.md +++ b/generated_pages/techniques/T0148.007.md @@ -7,8 +7,8 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00106 Facebook Is Being Flooded With Gross AI-Generated Images of Hurricane Helene Devastation](../../generated_pages/incidents/I00106.md) | As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.

A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.

But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.

Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.

But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.

The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.


A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis).

The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account, T0148.007: eCommerce Platform). | -| [I00110 How COVID-19 conspiracists and extremists use crowdfunding platforms to fund their activities](../../generated_pages/incidents/I00110.md) | The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:



More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | +| [I00106 Facebook Is Being Flooded With Gross AI-Generated Images of Hurricane Helene Devastation](../../generated_pages/incidents/I00106.md) | As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.

A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.

But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.

Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.

But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.

The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.


A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis).

The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account Asset, T0148.007: eCommerce Platform). | +| [I00110 How COVID-19 conspiracists and extremists use crowdfunding platforms to fund their activities](../../generated_pages/incidents/I00110.md) | The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:

More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | diff --git a/generated_pages/techniques/T0148.009.md b/generated_pages/techniques/T0148.009.md index 9ca4da1..9c0ca6c 100644 --- a/generated_pages/techniques/T0148.009.md +++ b/generated_pages/techniques/T0148.009.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00129 Teen who hacked Joe Biden and Bill Gates' Twitter accounts sentenced to three years in prison](../../generated_pages/incidents/I00129.md) | An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.

Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.

Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.

Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.


In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account, T0150.005: Compromised, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account, T0143.003: Impersonated Persona, T0150.005: Compromised, T0151.008: Microblogging Platform).

The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). | +| [I00129 Teen who hacked Joe Biden and Bill Gates' Twitter accounts sentenced to three years in prison](../../generated_pages/incidents/I00129.md) | An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.

Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.

Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.

Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.


In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account Asset, T0150.005: Compromised Asset, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account Asset, T0143.003: Impersonated Persona, T0150.005: Compromised Asset, T0151.008: Microblogging Platform).

The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). | diff --git a/generated_pages/techniques/T0149.001.md b/generated_pages/techniques/T0149.001.md index a05c93a..5e5ee9b 100644 --- a/generated_pages/techniques/T0149.001.md +++ b/generated_pages/techniques/T0149.001.md @@ -1,13 +1,13 @@ -# Technique T0149.001: Domain +# Technique T0149.001: Domain Asset -* **Summary**: A Domain is a web address (such as “www.google.com”), used to navigate to Websites on the internet.

Domains differ from Websites in that Websites are considered to be developed web pages which host content, whereas Domains do not necessarily host public-facing web content.

A threat actor may register a new domain to bypass the old domain being blocked. +* **Summary**: A Domain is a web address (such as “google[.]com”), used to navigate to Websites on the internet.

Domains differ from Websites in that Websites are considered to be developed web pages which host content, whereas Domains do not necessarily host public-facing web content.

A threat actor may register a new domain to bypass the old domain being blocked. * **Belongs to tactic stage**: TA06 | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain) reveal a website set up to sell merchandise (T0152.004: Website, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.<.i>

Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server, T0149.006: IP Address). | +| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain Asset) reveal a website set up to sell merchandise (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website Asset, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.


Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain Asset, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server Asset, T0149.006: IP Address Asset). | diff --git a/generated_pages/techniques/T0149.002.md b/generated_pages/techniques/T0149.002.md index 88b0dad..388a9a0 100644 --- a/generated_pages/techniques/T0149.002.md +++ b/generated_pages/techniques/T0149.002.md @@ -1,13 +1,13 @@ -# Technique T0149.002: Email Domain +# Technique T0149.002: Email Domain Asset -* **Summary**: An Email Domain is a Domain (such as “meta.com”) which has the ability to send emails.

Any Domain which has an MX (Mail Exchange) record and configured SMTP (Simple Mail Transfer Protocol) settings can send and receive emails, and is therefore an Email Domain. +* **Summary**: An Email Domain is a Domain (such as “meta[.]com”) which has the ability to send emails (e.g. from an @meta[.]com address).

Any Domain which has an MX (Mail Exchange) record and configured SMTP (Simple Mail Transfer Protocol) settings can send and receive emails, and is therefore an Email Domain. * **Belongs to tactic stage**: TA06 | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00126 Charming Kitten Updates POWERSTAR with an InterPlanetary Twist](../../generated_pages/incidents/I00126.md) | The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware). | +| [I00126 Charming Kitten Updates POWERSTAR with an InterPlanetary Twist](../../generated_pages/incidents/I00126.md) | The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain Asset) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware Asset). | diff --git a/generated_pages/techniques/T0149.003.md b/generated_pages/techniques/T0149.003.md index 756e499..13ee788 100644 --- a/generated_pages/techniques/T0149.003.md +++ b/generated_pages/techniques/T0149.003.md @@ -8,7 +8,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | | [I00107 The Lies Russia Tells Itself](../../generated_pages/incidents/I00107.md) | The Moscow firm Social Design Agency (SDA) has been attributed as being behind a Russian disinformation project known as Doppelganger:

The SDA’s deception work first surfaced in 2022, likely almost immediately after Doppelganger got off the ground. In April of that year, Meta, the parent company of Facebook and Instagram, disclosed in a quarterly report that it had removed from its platforms “a network of about 200 accounts operated from Russia.” By August 2022, German investigative journalists revealed that they had discovered forgeries of about 30 news sites, including many of the country’s biggest media outlets—Frankfurter Allgemeine, Der Spiegel, and Bild—but also Britain’s Daily Mail and France’s 20 Minutes. The sites had deceptive URLs such as www-dailymail-co-uk.dailymail.top.


As part of the SDA’s work, they created many websites which impersonated existing media outlets. Sites used domain impersonation tactics to increase perceived legitimacy of their impersonations (T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0152.003: Website Hosting Platform, T0149.003: Lookalike Domain). | -| [I00126 Charming Kitten Updates POWERSTAR with an InterPlanetary Twist](../../generated_pages/incidents/I00126.md) | The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware). | +| [I00126 Charming Kitten Updates POWERSTAR with an InterPlanetary Twist](../../generated_pages/incidents/I00126.md) | The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.

In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.


In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain Asset) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware Asset). | diff --git a/generated_pages/techniques/T0149.004.md b/generated_pages/techniques/T0149.004.md index 8485ea3..d378b7f 100644 --- a/generated_pages/techniques/T0149.004.md +++ b/generated_pages/techniques/T0149.004.md @@ -1,4 +1,4 @@ -# Technique T0149.004: Redirecting Domain +# Technique T0149.004: Redirecting Domain Asset * **Summary**: A Redirecting Domain is a Domain which has been configured to redirect users to another Domain when visited. @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00072 Behind the Dutch Terror Threat Video: The St. Petersburg "Troll Factory" Connection](../../generated_pages/incidents/I00072.md) | “The creator of Geopolitika[.]ru is Aleksandr Dugin, who was sanctioned by the United States Department of Treasury in 2015 for his role in the Eurasian Youth Union “for being responsible for or complicit in actions or policies that threaten the peace, security, stability, or sovereignty or territorial integrity of Ukraine.”

[...]

“Currently, the website geopolika[.]ru redirects directly to another partner website, Katehon.

“Katehon poses itself as a think tank focused on geopolitics in an English edition of its website. In contrast, in Russian, it states its aim to develop “ideological, political, diplomatic, economic and military strategy for Russia of the future” with a special role of religion. The president of Katehon’s supervisory board is Konstantin Malofeev, a Russian millionaire with connections to the Russian orthodox church and presidential administration, who founded Tsargrad TV, a known source of disinformation. Malofeev was sanctioned by the U.S. Department of Treasury and the European Union in 2014 for material support and financial backing of Russian-backed separatists in eastern Ukraine. Another known figure from the board is Sergei Glaziev, former advisor to Putin in 2012–2019. Dugin is also on the board in the Russian edition of the website, whereas he is omitted in English.”
In this example a domain managed by an actor previously sanctioned by the US department of treasury has been reconfigured to redirect to another website; Katehon (T0149.004: Redirecting Domain, T0150.004: Repurposed).

Katehon presents itself as a geopolitical think tank in English, but does not maintain this persona when presenting itself to a Russian speaking audience (T0097.204: Think Tank Persona, T0152.004: Website, T0155.004: Geoblocked). | +| [I00072 Behind the Dutch Terror Threat Video: The St. Petersburg "Troll Factory" Connection](../../generated_pages/incidents/I00072.md) | “The creator of Geopolitika[.]ru is Aleksandr Dugin, who was sanctioned by the United States Department of Treasury in 2015 for his role in the Eurasian Youth Union “for being responsible for or complicit in actions or policies that threaten the peace, security, stability, or sovereignty or territorial integrity of Ukraine.”

[...]

“Currently, the website geopolika[.]ru redirects directly to another partner website, Katehon.

“Katehon poses itself as a think tank focused on geopolitics in an English edition of its website. In contrast, in Russian, it states its aim to develop “ideological, political, diplomatic, economic and military strategy for Russia of the future” with a special role of religion. The president of Katehon’s supervisory board is Konstantin Malofeev, a Russian millionaire with connections to the Russian orthodox church and presidential administration, who founded Tsargrad TV, a known source of disinformation. Malofeev was sanctioned by the U.S. Department of Treasury and the European Union in 2014 for material support and financial backing of Russian-backed separatists in eastern Ukraine. Another known figure from the board is Sergei Glaziev, former advisor to Putin in 2012–2019. Dugin is also on the board in the Russian edition of the website, whereas he is omitted in English.”


In this example a domain managed by an actor previously sanctioned by the US department of treasury has been reconfigured to redirect to another website; Katehon (T0149.004: Redirecting Domain Asset, T0150.004: Repurposed Asset).

Katehon presents itself as a geopolitical think tank in English, but does not maintain this persona when presenting itself to a Russian speaking audience (T0097.204: Think Tank Persona, T0152.004: Website Asset, T0155.004: Geoblocked Asset). | diff --git a/generated_pages/techniques/T0149.005.md b/generated_pages/techniques/T0149.005.md index 0a114d1..bd567e2 100644 --- a/generated_pages/techniques/T0149.005.md +++ b/generated_pages/techniques/T0149.005.md @@ -1,4 +1,4 @@ -# Technique T0149.005: Server +# Technique T0149.005: Server Asset * **Summary**: A Server is a computer which provides resources, services, or data to other computers over a network. There are different types of servers, such as web servers (which serve web pages and applications to users), database servers (which manage and provide access to databases), and file servers (which store and share files across a network). @@ -7,8 +7,8 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00103 The racist AI deepfake that fooled and divided a community](../../generated_pages/incidents/I00103.md) | “I seriously don't understand why I have to constantly put up with these dumbasses here every day.”

So began what appeared to be a long tirade from the principal of Pikesville High School, punctuated with racist, antisemitic and offensive tropes. It sounded like it had been secretly recorded.

The speaker went on to bemoan “ungrateful black kids” and Jewish people in the community.

The clip, first posted in [January 2024], went viral nationally. But it really struck a nerve in the peaceful, leafy suburb of Pikesville, which has large black and Jewish populations, and in the nearby city of Baltimore, Maryland. Principal Eric Eiswert was put on paid administrative leave pending an investigation.

[...]

But what those sharing the clip didn’t realise at the time was that another bombshell was about to drop: the clip was an AI-generated fake.

[...]

[In April 2024], Baltimore Police Chief Robert McCullough confirmed they now had “conclusive evidence that the recording was not authentic”.

And they believed they knew who made the fake.

Police charged 31-year-old Dazhon Darien, the school’s athletics director, with several counts related to the fake video. Charges included theft, retaliating against a witness and stalking.

He was arrested at the airport, where police say he was planning to fly to Houston, Texas.

Police say that Mr Darien had been under investigation by Principal Eiswert over an alleged theft of $1,916 (£1,460) from the school. They also allege there had been “work performance challenges” and his contract was likely not to be renewed.

Their theory was that by creating the deepfake recording, he hoped to discredit the principal before he could be fired.

Investigators say they traced an email used to send the original video to a server connected to Mr Darien, and allege that he used Baltimore County Public Schools' computer network to access AI tools. He is due to stand trial in December 2024.


By associating Mr Darien to the server used to email the original AI generated audio, investigators link Darien to the fabricated content (T0149.005: Server, T0088.001: AI Generated Audio (Deepfakes)). They also assert that Darien used computers owned by the school to access platforms used to generate the audio (T0146: Account, T0154.002: AI Media Platform). | -| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain) reveal a website set up to sell merchandise (T0152.004: Website, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.<.i>

Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server, T0149.006: IP Address). | +| [I00103 The racist AI deepfake that fooled and divided a community](../../generated_pages/incidents/I00103.md) | “I seriously don't understand why I have to constantly put up with these dumbasses here every day.”

So began what appeared to be a long tirade from the principal of Pikesville High School, punctuated with racist, antisemitic and offensive tropes. It sounded like it had been secretly recorded.

The speaker went on to bemoan “ungrateful black kids” and Jewish people in the community.

The clip, first posted in [January 2024], went viral nationally. But it really struck a nerve in the peaceful, leafy suburb of Pikesville, which has large black and Jewish populations, and in the nearby city of Baltimore, Maryland. Principal Eric Eiswert was put on paid administrative leave pending an investigation.

[...]

But what those sharing the clip didn’t realise at the time was that another bombshell was about to drop: the clip was an AI-generated fake.

[...]

[In April 2024], Baltimore Police Chief Robert McCullough confirmed they now had “conclusive evidence that the recording was not authentic”.

And they believed they knew who made the fake.

Police charged 31-year-old Dazhon Darien, the school’s athletics director, with several counts related to the fake video. Charges included theft, retaliating against a witness and stalking.

He was arrested at the airport, where police say he was planning to fly to Houston, Texas.

Police say that Mr Darien had been under investigation by Principal Eiswert over an alleged theft of $1,916 (£1,460) from the school. They also allege there had been “work performance challenges” and his contract was likely not to be renewed.

Their theory was that by creating the deepfake recording, he hoped to discredit the principal before he could be fired.

Investigators say they traced an email used to send the original video to a server connected to Mr Darien, and allege that he used Baltimore County Public Schools' computer network to access AI tools. He is due to stand trial in December 2024.


By associating Mr Darien to the server used to email the original AI generated audio, investigators link Darien to the fabricated content (T0149.005: Server Asset, T0088.001: AI Generated Audio (Deepfakes)). They also assert that Darien used computers owned by the school to access platforms used to generate the audio (T0146: Account Asset, T0154.002: AI Media Platform). | +| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain Asset) reveal a website set up to sell merchandise (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website Asset, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.


Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain Asset, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server Asset, T0149.006: IP Address Asset). | diff --git a/generated_pages/techniques/T0149.006.md b/generated_pages/techniques/T0149.006.md index 5f0bdf0..d2b13a7 100644 --- a/generated_pages/techniques/T0149.006.md +++ b/generated_pages/techniques/T0149.006.md @@ -1,4 +1,4 @@ -# Technique T0149.006: IP Address +# Technique T0149.006: IP Address Asset * **Summary**: An IP Address is a unique numerical label assigned to each device connected to a computer network that uses the Internet Protocol for communication. IP addresses are commonly a part of any online infrastructure.

IP addresses can be in IPV4 dotted decimal (x.x.x.x) or IPV6 colon-separated hexadecimal (y:y:y:y:y:y:y:y) formats. @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain) reveal a website set up to sell merchandise (T0152.004: Website, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.<.i>

Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server, T0149.006: IP Address). | +| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain Asset) reveal a website set up to sell merchandise (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website Asset, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.


Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain Asset, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server Asset, T0149.006: IP Address Asset). | diff --git a/generated_pages/techniques/T0149.007.md b/generated_pages/techniques/T0149.007.md index 0fc0211..df549cd 100644 --- a/generated_pages/techniques/T0149.007.md +++ b/generated_pages/techniques/T0149.007.md @@ -1,4 +1,4 @@ -# Technique T0149.007: VPN +# Technique T0149.007: VPN Asset * **Summary**: A VPN (Virtual Private Network) is a service which creates secure, encrypted connections over the internet, allowing users to transmit data safely and access network resources remotely. It masks IP Addresses, enhancing privacy and security by preventing unauthorised access and tracking. VPNs are commonly used for protecting sensitive information, bypassing geographic restrictions, and maintaining online anonymity.

VPNs can also allow a threat actor to pose as if they are located in one country while in reality being based in another. By doing so, they can try to either mis-attribute their activities to another actor or better hide their own identity. diff --git a/generated_pages/techniques/T0149.008.md b/generated_pages/techniques/T0149.008.md index 7755414..3eb3261 100644 --- a/generated_pages/techniques/T0149.008.md +++ b/generated_pages/techniques/T0149.008.md @@ -1,4 +1,4 @@ -# Technique T0149.008: Proxy IP Address +# Technique T0149.008: Proxy IP Address Asset * **Summary**: A Proxy IP Address allows a threat actor to mask their real IP Address by putting a layer between them and the online content they’re connecting with.

Proxy IP Addresses can hide the connection between the threat actor and their online infrastructure. diff --git a/generated_pages/techniques/T0150.001.md b/generated_pages/techniques/T0150.001.md index c591d69..b436cc4 100644 --- a/generated_pages/techniques/T0150.001.md +++ b/generated_pages/techniques/T0150.001.md @@ -1,4 +1,4 @@ -# Technique T0150.001: Newly Created +# Technique T0150.001: Newly Created Asset * **Summary**: A Newly Created Asset is an asset which has been created and used for the first time in a documented potential incident.

For example, analysts which can identify a recent creation date of Accounts participating in the spread of a new narrative can assert these are Newly Created Assets.

Analysts should use Dormant if the asset was created and laid dormant for an extended period of time before activity. @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00116 Blue-tick scammers target consumers who complain on X](../../generated_pages/incidents/I00116.md) | Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account, T0146.003: Verified Account, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created). | +| [I00116 Blue-tick scammers target consumers who complain on X](../../generated_pages/incidents/I00116.md) | Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account Asset, T0146.003: Verified Account Asset, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created Asset). | diff --git a/generated_pages/techniques/T0150.002.md b/generated_pages/techniques/T0150.002.md index 3b2cfa0..0be47a9 100644 --- a/generated_pages/techniques/T0150.002.md +++ b/generated_pages/techniques/T0150.002.md @@ -1,4 +1,4 @@ -# Technique T0150.002: Dormant +# Technique T0150.002: Dormant Asset * **Summary**: A Dormant Asset is an asset which was inactive for an extended period before being used in a documented potential incident. diff --git a/generated_pages/techniques/T0150.003.md b/generated_pages/techniques/T0150.003.md index 45afca8..5e3d4c7 100644 --- a/generated_pages/techniques/T0150.003.md +++ b/generated_pages/techniques/T0150.003.md @@ -1,4 +1,4 @@ -# Technique T0150.003: Pre-Existing +# Technique T0150.003: Pre-Existing Asset * **Summary**: Pre-Existing Assets are assets which existed before the observed incident which have not been Repurposed; i.e. they are still being used for their original purpose.

An example could be an Account which presented itself with a Journalist Persona prior to and during the observed potential incident. @@ -7,8 +7,8 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00119 Independent journalist publishes Trump campaign document hacked by Iran despite election interference concerns](../../generated_pages/incidents/I00119.md) | An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog, T0150.003: Pre-Existing). | -| [I00120 factcheckUK or fakecheckUK? Reinventing the political faction as the impartial factchecker](../../generated_pages/incidents/I00120.md) | Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:


That is, until a few minutes into the debate.

All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.

The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.

The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it


In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing, T0146.003: Verified Account, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). | +| [I00119 Independent journalist publishes Trump campaign document hacked by Iran despite election interference concerns](../../generated_pages/incidents/I00119.md) | An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account Asset, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog Asset, T0150.003: Pre-Existing Asset). | +| [I00120 factcheckUK or fakecheckUK? Reinventing the political faction as the impartial factchecker](../../generated_pages/incidents/I00120.md) | Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:

The evening of the 19th November 2019 saw the first of three Leaders’ Debates on ITV, starting at 8pm and lasting for an hour. Current Prime Minister and leader of the Conservatives, Boris Johnson faced off against Labour party leader, Jeremy Corbyn. Plenty of people will have been watching the debate live, but a good proportion were “watching” (er, “twitching”?) via Twitter. This is something I’ve done in the past for certain shows. In some cases I just can’t watch or listen, but I can read, and in other cases, the commentary is far more interesting and entertaining than the show itself will ever be. This, for me, is just such a case. But very quickly, all eyes turned upon a modestly sized account with the handle @CCHQPress. That’s short for Conservative Campaign Headquarters Press. According to their (current!) Twitter bio, they are based in Westminster and they provide “snippets of news and commentary from CCHQ” to their 75k followers.

That is, until a few minutes into the debate.

All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.

The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.

The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it


In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing Asset, T0146.003: Verified Account Asset, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). | diff --git a/generated_pages/techniques/T0150.004.md b/generated_pages/techniques/T0150.004.md index f510d09..ba7c720 100644 --- a/generated_pages/techniques/T0150.004.md +++ b/generated_pages/techniques/T0150.004.md @@ -1,4 +1,4 @@ -# Technique T0150.004: Repurposed +# Technique T0150.004: Repurposed Asset * **Summary**: Repurposed Assets are assets which have been identified as being used previously, but are now being used for different purposes, or have new Presented Personas.

Actors have been documented compromising assets, and then repurposing them to present Inauthentic Personas as part of their operations. @@ -7,9 +7,9 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00072 Behind the Dutch Terror Threat Video: The St. Petersburg "Troll Factory" Connection](../../generated_pages/incidents/I00072.md) | “The creator of Geopolitika[.]ru is Aleksandr Dugin, who was sanctioned by the United States Department of Treasury in 2015 for his role in the Eurasian Youth Union “for being responsible for or complicit in actions or policies that threaten the peace, security, stability, or sovereignty or territorial integrity of Ukraine.”

[...]

“Currently, the website geopolika[.]ru redirects directly to another partner website, Katehon.

“Katehon poses itself as a think tank focused on geopolitics in an English edition of its website. In contrast, in Russian, it states its aim to develop “ideological, political, diplomatic, economic and military strategy for Russia of the future” with a special role of religion. The president of Katehon’s supervisory board is Konstantin Malofeev, a Russian millionaire with connections to the Russian orthodox church and presidential administration, who founded Tsargrad TV, a known source of disinformation. Malofeev was sanctioned by the U.S. Department of Treasury and the European Union in 2014 for material support and financial backing of Russian-backed separatists in eastern Ukraine. Another known figure from the board is Sergei Glaziev, former advisor to Putin in 2012–2019. Dugin is also on the board in the Russian edition of the website, whereas he is omitted in English.”
In this example a domain managed by an actor previously sanctioned by the US department of treasury has been reconfigured to redirect to another website; Katehon (T0149.004: Redirecting Domain, T0150.004: Repurposed).

Katehon presents itself as a geopolitical think tank in English, but does not maintain this persona when presenting itself to a Russian speaking audience (T0097.204: Think Tank Persona, T0152.004: Website, T0155.004: Geoblocked). | -| [I00113 Inside the Shadowy World of Disinformation for Hire in Kenya](../../generated_pages/incidents/I00113.md) | Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”. The report touches upon how actors gained access to twitter accounts, and what personas they presented:

Verified accounts are complicit. One influencer we spoke to mentioned that the people who own coveted “blue check” accounts will often rent them out for disinformation campaigns. These verified accounts can improve the campaign’s chances of trending. Says one interviewee: “The owner of the account usually receives a cut of the campaign loot”.

[...]

Many of the accounts we examined appear to give an aura of authenticity, but in reality they are not authentic. Simply looking at their date of creation won’t give you a hint as to their purpose. We had to dig deeper. The profile pictures and content of some of the accounts gave us the answers we were looking for. A common tactic these accounts utilize is using suggestive pictures of women to bait men into following them, or at least pay attention. In terms of content, many of these accounts tweeted off the same hashtags for days on end and will constantly retweet a specific set of accounts.


Actors participating in this operation rented out verified Twitter accounts (in 2021 a checkmark on Twitter verified a user’s identity), which were repurposed and used updated account imagery (T0146.003: Verified Account, T0150.007: Rented, T0150.004: Repurposed, T00145.006: Attractive Person Account Imagery). | -| [I00125 The Agency](../../generated_pages/incidents/I00125.md) | In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:

[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.

Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed).

A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. | +| [I00072 Behind the Dutch Terror Threat Video: The St. Petersburg "Troll Factory" Connection](../../generated_pages/incidents/I00072.md) | “The creator of Geopolitika[.]ru is Aleksandr Dugin, who was sanctioned by the United States Department of Treasury in 2015 for his role in the Eurasian Youth Union “for being responsible for or complicit in actions or policies that threaten the peace, security, stability, or sovereignty or territorial integrity of Ukraine.”

[...]

“Currently, the website geopolika[.]ru redirects directly to another partner website, Katehon.

“Katehon poses itself as a think tank focused on geopolitics in an English edition of its website. In contrast, in Russian, it states its aim to develop “ideological, political, diplomatic, economic and military strategy for Russia of the future” with a special role of religion. The president of Katehon’s supervisory board is Konstantin Malofeev, a Russian millionaire with connections to the Russian orthodox church and presidential administration, who founded Tsargrad TV, a known source of disinformation. Malofeev was sanctioned by the U.S. Department of Treasury and the European Union in 2014 for material support and financial backing of Russian-backed separatists in eastern Ukraine. Another known figure from the board is Sergei Glaziev, former advisor to Putin in 2012–2019. Dugin is also on the board in the Russian edition of the website, whereas he is omitted in English.”


In this example a domain managed by an actor previously sanctioned by the US department of treasury has been reconfigured to redirect to another website; Katehon (T0149.004: Redirecting Domain Asset, T0150.004: Repurposed Asset).

Katehon presents itself as a geopolitical think tank in English, but does not maintain this persona when presenting itself to a Russian speaking audience (T0097.204: Think Tank Persona, T0152.004: Website Asset, T0155.004: Geoblocked Asset). | +| [I00113 Inside the Shadowy World of Disinformation for Hire in Kenya](../../generated_pages/incidents/I00113.md) | Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”. The report touches upon how actors gained access to twitter accounts, and what personas they presented:

Verified accounts are complicit. One influencer we spoke to mentioned that the people who own coveted “blue check” accounts will often rent them out for disinformation campaigns. These verified accounts can improve the campaign’s chances of trending. Says one interviewee: “The owner of the account usually receives a cut of the campaign loot”.

[...]

Many of the accounts we examined appear to give an aura of authenticity, but in reality they are not authentic. Simply looking at their date of creation won’t give you a hint as to their purpose. We had to dig deeper. The profile pictures and content of some of the accounts gave us the answers we were looking for. A common tactic these accounts utilize is using suggestive pictures of women to bait men into following them, or at least pay attention. In terms of content, many of these accounts tweeted off the same hashtags for days on end and will constantly retweet a specific set of accounts.


Actors participating in this operation rented out verified Twitter accounts (in 2021 a checkmark on Twitter verified a user’s identity), which were repurposed and used updated account imagery (T0146.003: Verified Account Asset, T0150.007: Rented Asset, T0150.004: Repurposed Asset, T00145.006: Attractive Person Account Imagery). | +| [I00125 The Agency](../../generated_pages/incidents/I00125.md) | In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:

[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.

Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account Asset, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed Asset).

A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. | diff --git a/generated_pages/techniques/T0150.005.md b/generated_pages/techniques/T0150.005.md index 1cd3374..3ad962c 100644 --- a/generated_pages/techniques/T0150.005.md +++ b/generated_pages/techniques/T0150.005.md @@ -1,4 +1,4 @@ -# Technique T0150.005: Compromised +# Technique T0150.005: Compromised Asset * **Summary**: A Compromised Asset is an asset which was originally created or belonged to another person or organisation, but which an actor has gained access to without their consent.

See also MITRE ATT&CK T1708: Valid Accounts. @@ -7,10 +7,10 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00066 The online war between Qatar and Saudi Arabia](../../generated_pages/incidents/I00066.md) | _"In the early hours of 24 May 2017, a news story appeared on the website of Qatar's official news agency, QNA, reporting that the country's emir, Sheikh Tamim bin Hamad al-Thani, had made an astonishing speech."_

_"[…]_

_"Qatar claimed that the QNA had been hacked. And they said the hack was designed to deliberately spread fake news about the country's leader and its foreign policies. The Qataris specifically blamed UAE, an allegation later repeated by a Washington Post report which cited US intelligence sources. The UAE categorically denied those reports._

_"But the story of the emir's speech unleashed a media free-for-all. Within minutes, Saudi and UAE-owned TV networks - Al Arabiya and Sky News Arabia - picked up on the comments attributed to al-Thani. Both networks accused Qatar of funding extremist groups and of destabilising the region."_

This incident demonstrates how threat actors used a compromised website to allow for an inauthentic narrative to be given a level of credibility which caused significant political fallout (T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0152.004: Website, T0150.005: Compromised). | -| [I00071 Russia-aligned hacktivists stir up anti-Ukrainian sentiments in Poland](../../generated_pages/incidents/I00071.md) | “The August 17 [2022] Telegram post [which contained a falsified letter from the Ukrainian Minister of Foreign Affairs asking Poland to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII] also contained screenshots of Facebook posts that appeared on two Facebook accounts belonging to Polish nationals Piotr Górka, an expert in the history of the Polish Air Force, and Dariusz Walusiak, a Polish historian and documentary maker. The Górka post suggested that he fully supported the Polish government’s decision to change Belwederska Street to Stepan Bandera Street.

“In a statement to the DFRLab, Górka said his account was accessed without his consent. “This is not my post loaded to my Facebook page,” he explained. “My site was hacked, some days ago.” At the time of publishing, Piotr Górka’s post and his Facebook account were no longer accessible.

“The post on Górka’s Facebook page was shared by Dariusz Walusiak’s Facebook account; the account also reposted it on the Facebook walls of more than twenty other Facebook users, including Adam Kalita, currently working at Krakow branch of the Institute of National Remembrance; Jan Kasprzyk, head of the Office for War Veterans and Victims of Oppression; and Alicja Kondraciuk, a Polish public figure living in Krakow.

“Walusiak’s Facebook account is also no longer accessible. Given his work on Polish history and identity, it seems highly unlikely he would support the Bandera measure; the DFRLab has also reached out to him for comment.

“The fact that Joker DPR’s Telegram post included screenshots of their Facebook posts raises the strong possibility that both Facebook accounts were compromised, and that hackers planted false statements on their pages that would seem out of character for them in order to gain further attention to the forged documents.”


In this example, threat actors used compromised accounts of Polish historians who have enough relevant knowledge to plausibly weigh in on the forged letter’s narrative (T0143.003: Impersonated Persona, T0097.101: Local Persona, T0097.108: Expert Persona, T0146: Account, T0150.005: Compromised, T0151.001: Social Media Platform).

This matches T0097.108: Expert Persona because the impersonation exploited Górka and Walusiak’s existing personas as experts in Polish history. | -| [I00101 Pro-Putin Disinformation Warriors Take War of Aggression to Reddit](../../generated_pages/incidents/I00101.md) | This report looks at changes content posted to communities on Reddit (called Subreddits) after teams of voluntary moderators are replaced with what appear to be pro-Russian voices:

The r/antiwar subreddit appears to be a very recent takeover target. With 12,900 members it is not the largest community on Reddit, but this does place it squarely within the top 5% of all communities in terms of membership.

Three months ago a new moderator team was instated by subreddit head u/democracy101. Any posts documenting Russian aggression in Ukraine are now swiftly removed, while the board has been flooded with posts about how Ukraine is losing, or how American “neocons wrecked” the country.

The pinned post from moderator u/n0ahbody proclaims: “People who call for an end to Russian aggressions but not the Western aggressions Russia is reacting to don’t really want peace.” This user takes the view that any negative opinion about Russia is “shaped by what the fanatically Russophobic MSM wants you to think,” and that the United States is not threatened by its neighbors. Russia is.”

When u/n0ahbody took over the sub, the user posted a triumphant and vitriolic diatribe in another pro-Russia subreddit with some 33,500 members, r/EndlessWar. “We are making progress. We are purging the sub of all NAFO and NAFO-adjacent elements. Hundreds of them have been banned over the last 24 hours for various rule infractions, for being NAFO or NAFO-adjacent,” the user said, referencing the grassroots, pro-Ukrainian North Atlantic Fella Organization (NAFO) meme movement.

Several former users have reported they have indeed been banned from r/antiwar since the change in moderators. “If this subreddit cannot be explicitly against the invasion of Ukraine it will never truly be anti-war,” wrote one user Halcyon_Rein, in the antiwar subreddit on September 6. They then edited the post to say, “Edit: btw, I got f**king banned for this 💀💀💀”


A community hosted on Reddit was taken over by new moderators (T0151.011: Community Sub-Forum, T0150.005: Compromised). These moderators removed content posted to the community which favoured Ukraine over Russia (T0146.004: Administrator Account, T0151.011: Community Sub-Forum, T0124: Suppress Opposition). | -| [I00129 Teen who hacked Joe Biden and Bill Gates' Twitter accounts sentenced to three years in prison](../../generated_pages/incidents/I00129.md) | An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.

Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.

Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.

Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.


In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account, T0150.005: Compromised, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account, T0143.003: Impersonated Persona, T0150.005: Compromised, T0151.008: Microblogging Platform).

The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet)., An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.

Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.

Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.

Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.


In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account, T0150.005: Compromised, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account, T0143.003: Impersonated Persona, T0150.005: Compromised, T0151.008: Microblogging Platform).

The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). | +| [I00066 The online war between Qatar and Saudi Arabia](../../generated_pages/incidents/I00066.md) | _"In the early hours of 24 May 2017, a news story appeared on the website of Qatar's official news agency, QNA, reporting that the country's emir, Sheikh Tamim bin Hamad al-Thani, had made an astonishing speech."_

_"[…]_

_"Qatar claimed that the QNA had been hacked. And they said the hack was designed to deliberately spread fake news about the country's leader and its foreign policies. The Qataris specifically blamed UAE, an allegation later repeated by a Washington Post report which cited US intelligence sources. The UAE categorically denied those reports._

_"But the story of the emir's speech unleashed a media free-for-all. Within minutes, Saudi and UAE-owned TV networks - Al Arabiya and Sky News Arabia - picked up on the comments attributed to al-Thani. Both networks accused Qatar of funding extremist groups and of destabilising the region."_

This incident demonstrates how threat actors used a compromised website to allow for an inauthentic narrative to be given a level of credibility which caused significant political fallout (T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0152.004: Website Asset, T0150.005: Compromised Asset). | +| [I00071 Russia-aligned hacktivists stir up anti-Ukrainian sentiments in Poland](../../generated_pages/incidents/I00071.md) | “The August 17 [2022] Telegram post [which contained a falsified letter from the Ukrainian Minister of Foreign Affairs asking Poland to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII] also contained screenshots of Facebook posts that appeared on two Facebook accounts belonging to Polish nationals Piotr Górka, an expert in the history of the Polish Air Force, and Dariusz Walusiak, a Polish historian and documentary maker. The Górka post suggested that he fully supported the Polish government’s decision to change Belwederska Street to Stepan Bandera Street.

“In a statement to the DFRLab, Górka said his account was accessed without his consent. “This is not my post loaded to my Facebook page,” he explained. “My site was hacked, some days ago.” At the time of publishing, Piotr Górka’s post and his Facebook account were no longer accessible.

“The post on Górka’s Facebook page was shared by Dariusz Walusiak’s Facebook account; the account also reposted it on the Facebook walls of more than twenty other Facebook users, including Adam Kalita, currently working at Krakow branch of the Institute of National Remembrance; Jan Kasprzyk, head of the Office for War Veterans and Victims of Oppression; and Alicja Kondraciuk, a Polish public figure living in Krakow.

“Walusiak’s Facebook account is also no longer accessible. Given his work on Polish history and identity, it seems highly unlikely he would support the Bandera measure; the DFRLab has also reached out to him for comment.

“The fact that Joker DPR’s Telegram post included screenshots of their Facebook posts raises the strong possibility that both Facebook accounts were compromised, and that hackers planted false statements on their pages that would seem out of character for them in order to gain further attention to the forged documents.”


In this example, threat actors used compromised accounts of Polish historians who have enough relevant knowledge to plausibly weigh in on the forged letter’s narrative (T0143.003: Impersonated Persona, T0097.101: Local Persona, T0097.108: Expert Persona, T0146: Account Asset, T0150.005: Compromised Asset, T0151.001: Social Media Platform).

This matches T0097.108: Expert Persona because the impersonation exploited Górka and Walusiak’s existing personas as experts in Polish history. | +| [I00101 Pro-Putin Disinformation Warriors Take War of Aggression to Reddit](../../generated_pages/incidents/I00101.md) | This report looks at changes content posted to communities on Reddit (called Subreddits) after teams of voluntary moderators are replaced with what appear to be pro-Russian voices:

The r/antiwar subreddit appears to be a very recent takeover target. With 12,900 members it is not the largest community on Reddit, but this does place it squarely within the top 5% of all communities in terms of membership.

Three months ago a new moderator team was instated by subreddit head u/democracy101. Any posts documenting Russian aggression in Ukraine are now swiftly removed, while the board has been flooded with posts about how Ukraine is losing, or how American “neocons wrecked” the country.

The pinned post from moderator u/n0ahbody proclaims: “People who call for an end to Russian aggressions but not the Western aggressions Russia is reacting to don’t really want peace.” This user takes the view that any negative opinion about Russia is “shaped by what the fanatically Russophobic MSM wants you to think,” and that the United States is not threatened by its neighbors. Russia is.”

When u/n0ahbody took over the sub, the user posted a triumphant and vitriolic diatribe in another pro-Russia subreddit with some 33,500 members, r/EndlessWar. “We are making progress. We are purging the sub of all NAFO and NAFO-adjacent elements. Hundreds of them have been banned over the last 24 hours for various rule infractions, for being NAFO or NAFO-adjacent,” the user said, referencing the grassroots, pro-Ukrainian North Atlantic Fella Organization (NAFO) meme movement.

Several former users have reported they have indeed been banned from r/antiwar since the change in moderators. “If this subreddit cannot be explicitly against the invasion of Ukraine it will never truly be anti-war,” wrote one user Halcyon_Rein, in the antiwar subreddit on September 6. They then edited the post to say, “Edit: btw, I got f**king banned for this 💀💀💀”


A community hosted on Reddit was taken over by new moderators (T0151.011: Community Sub-Forum, T0150.005: Compromised Asset). These moderators removed content posted to the community which favoured Ukraine over Russia (T0146.004: Administrator Account Asset, T0151.011: Community Sub-Forum, T0124: Suppress Opposition). | +| [I00129 Teen who hacked Joe Biden and Bill Gates' Twitter accounts sentenced to three years in prison](../../generated_pages/incidents/I00129.md) | An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.

Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.

Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.

Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.


In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account Asset, T0150.005: Compromised Asset, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account Asset, T0143.003: Impersonated Persona, T0150.005: Compromised Asset, T0151.008: Microblogging Platform).

The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet)., An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.

Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.

Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.

Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.


In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account Asset, T0150.005: Compromised Asset, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account Asset, T0143.003: Impersonated Persona, T0150.005: Compromised Asset, T0151.008: Microblogging Platform).

The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). | diff --git a/generated_pages/techniques/T0150.006.md b/generated_pages/techniques/T0150.006.md index 1976579..bf430f7 100644 --- a/generated_pages/techniques/T0150.006.md +++ b/generated_pages/techniques/T0150.006.md @@ -1,4 +1,4 @@ -# Technique T0150.006: Purchased +# Technique T0150.006: Purchased Asset * **Summary**: A Purchased Asset is an asset which actors paid for the ownership of.

For example, threat actors have been observed selling compromised social media accounts on dark web marketplaces, which can be used to disguise operation activity. @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain) reveal a website set up to sell merchandise (T0152.004: Website, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.<.i>

Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server, T0149.006: IP Address). | +| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:

[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.

Other domains attributed to Suavelos (T0149.001: Domain Asset) reveal a website set up to sell merchandise (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website Asset, T0097.207: NGO Persona).

To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains.

The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.


Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain Asset, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server Asset, T0149.006: IP Address Asset). | diff --git a/generated_pages/techniques/T0150.007.md b/generated_pages/techniques/T0150.007.md index 6033bbb..c9988ae 100644 --- a/generated_pages/techniques/T0150.007.md +++ b/generated_pages/techniques/T0150.007.md @@ -1,4 +1,4 @@ -# Technique T0150.007: Rented +# Technique T0150.007: Rented Asset * **Summary**: A Rented Asset is an asset which actors are temporarily renting or subscribing to.

For example, threat actors have been observed renting temporary access to legitimate accounts on online platforms in order to disguise operation activity. @@ -7,8 +7,8 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00064 Tinder nightmares: the promise and peril of political bots](../../generated_pages/incidents/I00064.md) | “In the days leading up to the UK’s [2019] general election, youths looking for love online encountered a whole new kind of Tinder nightmare. A group of young activists built a Tinder chatbot to co-opt profiles and persuade swing voters to support Labour. The bot accounts sent 30,000-40,000 messages to targeted 18-25 year olds in battleground constituencies like Dudley North, which Labour ended up winning by only 22 votes. [...]

“The activists maintain that the project was meant to foster democratic engagement. But screenshots of the bots’ activity expose a harsher reality. Images of conversations between real users and these bots, posted on i-D, Mashable, as well as on Fowler and Goodman’s public Twitter accounts, show that the bots did not identify themselves as automated accounts, instead posing as the user whose profile they had taken over. While conducting research for this story, it turned out that a number of [the reporters’ friends] living in Oxford had interacted with the bot in the lead up to the election and had no idea that it was not a real person.”


In this example people offered up their real accounts for the automation of political messaging; the actors convinced the users to give up access to their accounts to use in the operation. The actors maintained the accounts’ existing persona, and presented themselves as potential romantic suitors for legitimate platform users (T0097:109 Romantic Suitor Persona, T0143.003: Impersonated Persona, T0146: Account, T0150.007: Rented, T0151.017: Dating Platform). | -| [I00113 Inside the Shadowy World of Disinformation for Hire in Kenya](../../generated_pages/incidents/I00113.md) | Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”. The report touches upon how actors gained access to twitter accounts, and what personas they presented:

Verified accounts are complicit. One influencer we spoke to mentioned that the people who own coveted “blue check” accounts will often rent them out for disinformation campaigns. These verified accounts can improve the campaign’s chances of trending. Says one interviewee: “The owner of the account usually receives a cut of the campaign loot”.

[...]

Many of the accounts we examined appear to give an aura of authenticity, but in reality they are not authentic. Simply looking at their date of creation won’t give you a hint as to their purpose. We had to dig deeper. The profile pictures and content of some of the accounts gave us the answers we were looking for. A common tactic these accounts utilize is using suggestive pictures of women to bait men into following them, or at least pay attention. In terms of content, many of these accounts tweeted off the same hashtags for days on end and will constantly retweet a specific set of accounts.


Actors participating in this operation rented out verified Twitter accounts (in 2021 a checkmark on Twitter verified a user’s identity), which were repurposed and used updated account imagery (T0146.003: Verified Account, T0150.007: Rented, T0150.004: Repurposed, T00145.006: Attractive Person Account Imagery). | +| [I00064 Tinder nightmares: the promise and peril of political bots](../../generated_pages/incidents/I00064.md) | “In the days leading up to the UK’s [2019] general election, youths looking for love online encountered a whole new kind of Tinder nightmare. A group of young activists built a Tinder chatbot to co-opt profiles and persuade swing voters to support Labour. The bot accounts sent 30,000-40,000 messages to targeted 18-25 year olds in battleground constituencies like Dudley North, which Labour ended up winning by only 22 votes. [...]

“The activists maintain that the project was meant to foster democratic engagement. But screenshots of the bots’ activity expose a harsher reality. Images of conversations between real users and these bots, posted on i-D, Mashable, as well as on Fowler and Goodman’s public Twitter accounts, show that the bots did not identify themselves as automated accounts, instead posing as the user whose profile they had taken over. While conducting research for this story, it turned out that a number of [the reporters’ friends] living in Oxford had interacted with the bot in the lead up to the election and had no idea that it was not a real person.”


In this example people offered up their real accounts for the automation of political messaging; the actors convinced the users to give up access to their accounts to use in the operation. The actors maintained the accounts’ existing persona, and presented themselves as potential romantic suitors for legitimate platform users (T0097:109 Romantic Suitor Persona, T0143.003: Impersonated Persona, T0146: Account Asset, T0150.007: Rented Asset, T0151.017: Dating Platform). | +| [I00113 Inside the Shadowy World of Disinformation for Hire in Kenya](../../generated_pages/incidents/I00113.md) | Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”. The report touches upon how actors gained access to twitter accounts, and what personas they presented:

Verified accounts are complicit. One influencer we spoke to mentioned that the people who own coveted “blue check” accounts will often rent them out for disinformation campaigns. These verified accounts can improve the campaign’s chances of trending. Says one interviewee: “The owner of the account usually receives a cut of the campaign loot”.

[...]

Many of the accounts we examined appear to give an aura of authenticity, but in reality they are not authentic. Simply looking at their date of creation won’t give you a hint as to their purpose. We had to dig deeper. The profile pictures and content of some of the accounts gave us the answers we were looking for. A common tactic these accounts utilize is using suggestive pictures of women to bait men into following them, or at least pay attention. In terms of content, many of these accounts tweeted off the same hashtags for days on end and will constantly retweet a specific set of accounts.


Actors participating in this operation rented out verified Twitter accounts (in 2021 a checkmark on Twitter verified a user’s identity), which were repurposed and used updated account imagery (T0146.003: Verified Account Asset, T0150.007: Rented Asset, T0150.004: Repurposed Asset, T00145.006: Attractive Person Account Imagery). | diff --git a/generated_pages/techniques/T0150.008.md b/generated_pages/techniques/T0150.008.md index 522619a..f283df4 100644 --- a/generated_pages/techniques/T0150.008.md +++ b/generated_pages/techniques/T0150.008.md @@ -1,4 +1,4 @@ -# Technique T0150.008: Bulk Created +# Technique T0150.008: Bulk Created Asset * **Summary**: A Bulk Created Asset is an asset which was created alongside many other instances of the same asset.

Actors have been observed bulk creating Accounts on Social Media Platforms such as Facebook. Indicators of bulk asset creation include its creation date, assets’ naming conventions, their configuration (e.g. templated personas, visually similar profile pictures), or their activity (e.g. post timings, narratives posted). diff --git a/generated_pages/techniques/T0151.001.md b/generated_pages/techniques/T0151.001.md index 54bde42..7d57e57 100644 --- a/generated_pages/techniques/T0151.001.md +++ b/generated_pages/techniques/T0151.001.md @@ -8,13 +8,13 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | | [I00097 Report: Not Just Algorithms](../../generated_pages/incidents/I00097.md) | This report explores the role of four systems (recommender systems, content moderation systems, ad approval systems and ad management systems) in creating risks around eating disorders.

[...]

Content recommender systems can create risks. We created and primed ‘fake’ accounts for 16-year old Australians and found that some recommender systems will promote pro-eating disorder content to children.

Specifically: On TikTok, 0% of the content recommended was classified as pro-eating disorder content; On Instagram, 23% of the content recommended was classified as pro-eating disorder content; On X, 67% of content recommended was classified as pro-eating disorder content (and disturbingly, another 13% displayed self-harm imagery).


Content recommendation algorithms developed by Instagram (T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm) and X (T0151.008: Microblogging Platform, T0153.006: Content Recommendation Algorithm) promoted harmful content to an account presenting as a 16 year old Australian. | -| [I00102 Ignore The Poway Synagogue Shooter’s Manifesto: Pay Attention To 8chan’s /pol/ Board](../../generated_pages/incidents/I00102.md) | On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | -| [I00106 Facebook Is Being Flooded With Gross AI-Generated Images of Hurricane Helene Devastation](../../generated_pages/incidents/I00106.md) | As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.

A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.

But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.

Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.

But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.

The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.


A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis).

The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account, T0148.007: eCommerce Platform). | -| [I00108 How you thought you support the animals and you ended up funding white supremacists](../../generated_pages/incidents/I00108.md) | This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account, T0153.005: Online Advertising Platform). | -| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform). | -| [I00114 ‘Carol’s Journey’: What Facebook knew about how it radicalized users](../../generated_pages/incidents/I00114.md) | This report examines internal Facebook communications which reveal employees’ concerns about how the platform’s algorithm was recommending users join extremist conspiracy groups.

In summer 2019, a new Facebook user named Carol Smith signed up for the platform, describing herself as a politically conservative mother from Wilmington, North Carolina. Smith’s account indicated an interest in politics, parenting and Christianity and followed a few of her favorite brands, including Fox News and then-President Donald Trump.

Though Smith had never expressed interest in conspiracy theories, in just two days Facebook was recommending she join groups dedicated to QAnon, a sprawling and baseless conspiracy theory and movement that claimed Trump was secretly saving the world from a cabal of pedophiles and Satanists.

Smith didn’t follow the recommended QAnon groups, but whatever algorithm Facebook was using to determine how she should engage with the platform pushed ahead just the same. Within one week, Smith’s feed was full of groups and pages that had violated Facebook’s own rules, including those against hate speech and disinformation.

Smith wasn’t a real person. A researcher employed by Facebook invented the account, along with those of other fictitious “test users” in 2019 and 2020, as part of an experiment in studying the platform’s role in misinforming and polarizing users through its recommendations systems.

That researcher said Smith’s Facebook experience was “a barrage of extreme, conspiratorial, and graphic content.”


Facebook’s Algorithm suggested users join groups which supported the QAnon movement (T0151.001: Social Media Platform, T0151.002: Online Community Group, T0153.006: Content Recommendation Algorithm, T0097.208: Social Cause Persona).

Further investigation by Facebook uncovered that its advertising platform had been used to promote QAnon narratives (T0146: Account, T0114: Deliver Ads, T0153.005: Online Advertising Platform):

For years, company researchers had been running experiments like Carol Smith’s to gauge the platform’s hand in radicalizing users, according to the documents seen by NBC News.

This internal work repeatedly found that recommendation tools pushed users into extremist groups, findings that helped inform policy changes and tweaks to recommendations and news feed rankings. Those rankings are a tentacled, ever-evolving system widely known as “the algorithm” that pushes content to users. But the research at that time stopped well short of inspiring any movement to change the groups and pages themselves.

That reluctance was indicative of “a pattern at Facebook,” Haugen told reporters this month. “They want the shortest path between their current policies and any action.”

[...]

By summer 2020, Facebook was hosting thousands of private QAnon groups and pages, with millions of members and followers, according to an unreleased internal investigation.

A year after the FBI designated QAnon as a potential domestic terrorist threat in the wake of standoffs, alleged planned kidnappings, harassment campaigns and shootings, Facebook labeled QAnon a “Violence Inciting Conspiracy Network” and banned it from the platform, along with militias and other violent social movements. A small team working across several of Facebook’s departments found its platforms had hosted hundreds of ads on Facebook and Instagram worth thousands of dollars and millions of views, “praising, supporting, or representing” the conspiracy theory.

[...]

For many employees inside Facebook, the enforcement came too late, according to posts left on Workplace, the company’s internal message board.

“We’ve known for over a year now that our recommendation systems can very quickly lead users down the path to conspiracy theories and groups,” one integrity researcher, whose name had been redacted, wrote in a post announcing she was leaving the company. “This fringe group has grown to national prominence, with QAnon congressional candidates and QAnon hashtags and groups trending in the mainstream. We were willing to act only * after * things had spiraled into a dire state.”

While Facebook’s ban initially appeared effective, a problem remained: The removal of groups and pages didn’t wipe out QAnon’s most extreme followers, who continued to organize on the platform.

“There was enough evidence to raise red flags in the expert community that Facebook and other platforms failed to address QAnon’s violent extremist dimension,” said Marc-André Argentino, a research fellow at King’s College London’s International Centre for the Study of Radicalisation, who has extensively studied QAnon.

Believers simply rebranded as anti-child-trafficking groups or migrated to other communities, including those around the anti-vaccine movement.

[...]

These conspiracy groups had become the fastest-growing groups on Facebook, according to the report, but Facebook wasn’t able to control their “meteoric growth,” the researchers wrote, “because we were looking at each entity individually, rather than as a cohesive movement.” A Facebook spokesperson told BuzzFeed News it took many steps to limit election misinformation but that it was unable to catch everything.
| -| [I00115 How Facebook shapes your feed](../../generated_pages/incidents/I00115.md) | This 2021 report by The Washington Post explains the mechanics of Facebook’s algorithm (T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm):

In its early years, Facebook’s algorithm prioritized signals such as likes, clicks and comments to decide which posts to amplify. Publishers, brands and individual users soon learned how to craft posts and headlines designed to induce likes and clicks, giving rise to what came to be known as “clickbait.” By 2013, upstart publishers such as Upworthy and ViralNova were amassing tens of millions of readers with articles designed specifically to game Facebook’s news feed algorithm.

Facebook realized that users were growing wary of misleading teaser headlines, and the company recalibrated its algorithm in 2014 and 2015 to downgrade clickbait and focus on new metrics, such as the amount of time a user spent reading a story or watching a video, and incorporating surveys on what content users found most valuable. Around the same time, its executives identified video as a business priority, and used the algorithm to boost “native” videos shared directly to Facebook. By the mid-2010s, the news feed had tilted toward slick, professionally produced content, especially videos that would hold people’s attention.

In 2016, however, Facebook executives grew worried about a decline in “original sharing.” Users were spending so much time passively watching and reading that they weren’t interacting with each other as much. Young people in particular shifted their personal conversations to rivals such as Snapchat that offered more intimacy.

Once again, Facebook found its answer in the algorithm: It developed a new set of goal metrics that it called “meaningful social interactions,” designed to show users more posts from friends and family, and fewer from big publishers and brands. In particular, the algorithm began to give outsize weight to posts that sparked lots of comments and replies.

The downside of this approach was that the posts that sparked the most comments tended to be the ones that made people angry or offended them, the documents show. Facebook became an angrier, more polarizing place. It didn’t help that, starting in 2017, the algorithm had assigned reaction emoji — including the angry emoji — five times the weight of a simple “like,” according to company documents.

[...]

Internal documents show Facebook researchers found that, for the most politically oriented 1 million American users, nearly 90 percent of the content that Facebook shows them is about politics and social issues. Those groups also received the most misinformation, especially a set of users associated with mostly right-leaning content, who were shown one misinformation post out of every 40, according to a document from June 2020.

One takeaway is that Facebook’s algorithm isn’t a runaway train. The company may not directly control what any given user posts, but by choosing which types of posts will be seen, it sculpts the information landscape according to its business priorities. Some within the company would like to see Facebook use the algorithm to explicitly promote certain values, such as democracy and civil discourse. Others have suggested that it develop and prioritize new metrics that align with users’ values, as with a 2020 experiment in which the algorithm was trained to predict what posts they would find “good for the world” and “bad for the world,” and optimize for the former.
| -| [I00128 #TrollTracker: Outward Influence Operation From Iran](../../generated_pages/incidents/I00128.md) | [Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.

The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.

Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.


In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code), which took users to a document hosted on another website (T0152.004: Website). | +| [I00102 Ignore The Poway Synagogue Shooter’s Manifesto: Pay Attention To 8chan’s /pol/ Board](../../generated_pages/incidents/I00102.md) | On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | +| [I00106 Facebook Is Being Flooded With Gross AI-Generated Images of Hurricane Helene Devastation](../../generated_pages/incidents/I00106.md) | As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.

A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.

But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.

Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.

But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.

The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.


A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis).

The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account Asset, T0148.007: eCommerce Platform). | +| [I00108 How you thought you support the animals and you ended up funding white supremacists](../../generated_pages/incidents/I00108.md) | This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website Asset), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account Asset, T0153.005: Online Advertising Platform). | +| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform). | +| [I00114 ‘Carol’s Journey’: What Facebook knew about how it radicalized users](../../generated_pages/incidents/I00114.md) | This report examines internal Facebook communications which reveal employees’ concerns about how the platform’s algorithm was recommending users join extremist conspiracy groups.

In summer 2019, a new Facebook user named Carol Smith signed up for the platform, describing herself as a politically conservative mother from Wilmington, North Carolina. Smith’s account indicated an interest in politics, parenting and Christianity and followed a few of her favorite brands, including Fox News and then-President Donald Trump.

Though Smith had never expressed interest in conspiracy theories, in just two days Facebook was recommending she join groups dedicated to QAnon, a sprawling and baseless conspiracy theory and movement that claimed Trump was secretly saving the world from a cabal of pedophiles and Satanists.

Smith didn’t follow the recommended QAnon groups, but whatever algorithm Facebook was using to determine how she should engage with the platform pushed ahead just the same. Within one week, Smith’s feed was full of groups and pages that had violated Facebook’s own rules, including those against hate speech and disinformation.

Smith wasn’t a real person. A researcher employed by Facebook invented the account, along with those of other fictitious “test users” in 2019 and 2020, as part of an experiment in studying the platform’s role in misinforming and polarizing users through its recommendations systems.

That researcher said Smith’s Facebook experience was “a barrage of extreme, conspiratorial, and graphic content.”


Facebook’s Algorithm suggested users join groups which supported the QAnon movement (T0151.001: Social Media Platform, T0151.002: Online Community Group, T0153.006: Content Recommendation Algorithm, T0097.208: Social Cause Persona).

Further investigation by Facebook uncovered that its advertising platform had been used to promote QAnon narratives (T0146: Account Asset, T0114: Deliver Ads, T0153.005: Online Advertising Platform):

For years, company researchers had been running experiments like Carol Smith’s to gauge the platform’s hand in radicalizing users, according to the documents seen by NBC News.

This internal work repeatedly found that recommendation tools pushed users into extremist groups, findings that helped inform policy changes and tweaks to recommendations and news feed rankings. Those rankings are a tentacled, ever-evolving system widely known as “the algorithm” that pushes content to users. But the research at that time stopped well short of inspiring any movement to change the groups and pages themselves.

That reluctance was indicative of “a pattern at Facebook,” Haugen told reporters this month. “They want the shortest path between their current policies and any action.”

[...]

By summer 2020, Facebook was hosting thousands of private QAnon groups and pages, with millions of members and followers, according to an unreleased internal investigation.

A year after the FBI designated QAnon as a potential domestic terrorist threat in the wake of standoffs, alleged planned kidnappings, harassment campaigns and shootings, Facebook labeled QAnon a “Violence Inciting Conspiracy Network” and banned it from the platform, along with militias and other violent social movements. A small team working across several of Facebook’s departments found its platforms had hosted hundreds of ads on Facebook and Instagram worth thousands of dollars and millions of views, “praising, supporting, or representing” the conspiracy theory.

[...]

For many employees inside Facebook, the enforcement came too late, according to posts left on Workplace, the company’s internal message board.

“We’ve known for over a year now that our recommendation systems can very quickly lead users down the path to conspiracy theories and groups,” one integrity researcher, whose name had been redacted, wrote in a post announcing she was leaving the company. “This fringe group has grown to national prominence, with QAnon congressional candidates and QAnon hashtags and groups trending in the mainstream. We were willing to act only * after * things had spiraled into a dire state.”

While Facebook’s ban initially appeared effective, a problem remained: The removal of groups and pages didn’t wipe out QAnon’s most extreme followers, who continued to organize on the platform.

“There was enough evidence to raise red flags in the expert community that Facebook and other platforms failed to address QAnon’s violent extremist dimension,” said Marc-André Argentino, a research fellow at King’s College London’s International Centre for the Study of Radicalisation, who has extensively studied QAnon.

Believers simply rebranded as anti-child-trafficking groups or migrated to other communities, including those around the anti-vaccine movement.

[...]

These conspiracy groups had become the fastest-growing groups on Facebook, according to the report, but Facebook wasn’t able to control their “meteoric growth,” the researchers wrote, “because we were looking at each entity individually, rather than as a cohesive movement.” A Facebook spokesperson told BuzzFeed News it took many steps to limit election misinformation but that it was unable to catch everything.
| +| [I00115 How Facebook shapes your feed](../../generated_pages/incidents/I00115.md) | This 2021 report by The Washington Post explains the mechanics of Facebook’s algorithm (T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm):

In its early years, Facebook’s algorithm prioritized signals such as likes, clicks and comments to decide which posts to amplify. Publishers, brands and individual users soon learned how to craft posts and headlines designed to induce likes and clicks, giving rise to what came to be known as “clickbait.” By 2013, upstart publishers such as Upworthy and ViralNova were amassing tens of millions of readers with articles designed specifically to game Facebook’s news feed algorithm.

Facebook realized that users were growing wary of misleading teaser headlines, and the company recalibrated its algorithm in 2014 and 2015 to downgrade clickbait and focus on new metrics, such as the amount of time a user spent reading a story or watching a video, and incorporating surveys on what content users found most valuable. Around the same time, its executives identified video as a business priority, and used the algorithm to boost “native” videos shared directly to Facebook. By the mid-2010s, the news feed had tilted toward slick, professionally produced content, especially videos that would hold people’s attention.

In 2016, however, Facebook executives grew worried about a decline in “original sharing.” Users were spending so much time passively watching and reading that they weren’t interacting with each other as much. Young people in particular shifted their personal conversations to rivals such as Snapchat that offered more intimacy.

Once again, Facebook found its answer in the algorithm: It developed a new set of goal metrics that it called “meaningful social interactions,” designed to show users more posts from friends and family, and fewer from big publishers and brands. In particular, the algorithm began to give outsize weight to posts that sparked lots of comments and replies.

The downside of this approach was that the posts that sparked the most comments tended to be the ones that made people angry or offended them, the documents show. Facebook became an angrier, more polarizing place. It didn’t help that, starting in 2017, the algorithm had assigned reaction emoji — including the angry emoji — five times the weight of a simple “like,” according to company documents.

[...]

Internal documents show Facebook researchers found that, for the most politically oriented 1 million American users, nearly 90 percent of the content that Facebook shows them is about politics and social issues. Those groups also received the most misinformation, especially a set of users associated with mostly right-leaning content, who were shown one misinformation post out of every 40, according to a document from June 2020.

One takeaway is that Facebook’s algorithm isn’t a runaway train. The company may not directly control what any given user posts, but by choosing which types of posts will be seen, it sculpts the information landscape according to its business priorities. Some within the company would like to see Facebook use the algorithm to explicitly promote certain values, such as democracy and civil discourse. Others have suggested that it develop and prioritize new metrics that align with users’ values, as with a 2020 experiment in which the algorithm was trained to predict what posts they would find “good for the world” and “bad for the world,” and optimize for the former.
| +| [I00128 #TrollTracker: Outward Influence Operation From Iran](../../generated_pages/incidents/I00128.md) | [Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.

The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.

Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.


In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code Asset), which took users to a document hosted on another website (T0152.004: Website Asset). | diff --git a/generated_pages/techniques/T0151.002.md b/generated_pages/techniques/T0151.002.md index e8a3d9d..604e0b4 100644 --- a/generated_pages/techniques/T0151.002.md +++ b/generated_pages/techniques/T0151.002.md @@ -7,10 +7,10 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00105 Gaming the System: The Use of Gaming-Adjacent Communication, Game and Mod Platforms by Extremist Actors](../../generated_pages/incidents/I00105.md) | In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Indie DB [is a platform that serves] to present indie games, which are titles from independent, small developer teams, which can be discussed and downloaded [Indie DB].

[...]

[On Indie DB we] found antisemitic, Islamist, sexist and other discriminatory content during the exploration. Both games and comments were located that made positive references to National Socialism. Radicalised users seem to use the opportunities to network, create groups, and communicate in forums. In addition, a number of member profiles with graphic propaganda content could be located. We found several games with propagandistic content advertised on the platform, which caused apparently radicalised users to form a fan community around those games. For example, there are titles such as the antisemitic Fursan Al-Aqsa: The Knights of the Al-Aqsa Mosque, which has won the Best Hardcore Game award at the Game Connection America 2024 Game Development Awards and which has received antisemitic praise on its review page. In the game, players need to target members of the Israeli Defence Forces, and attacks by Palestinian terrorist groups such as Hamas or Lions’ Den can be re-enacted. These include bomb attacks, beheadings and the re-enactment of the terrorist attack in Israel on 7 October 2023. We, therefore, deem Indie DB to be relevant for further analyses of extremist activities in digital gaming spaces.


Indie DB is an online software delivery platform on which users can create groups, and participate in discussion forums (T0152.009: Software Delivery Platform, T0151.002: Online Community Group, T0151.009: Legacy Online Forum Platform). The platform hosted games which allowed players to reenact terrorist attacks (T0147.001: Game). | -| [I00114 ‘Carol’s Journey’: What Facebook knew about how it radicalized users](../../generated_pages/incidents/I00114.md) | This report examines internal Facebook communications which reveal employees’ concerns about how the platform’s algorithm was recommending users join extremist conspiracy groups.

In summer 2019, a new Facebook user named Carol Smith signed up for the platform, describing herself as a politically conservative mother from Wilmington, North Carolina. Smith’s account indicated an interest in politics, parenting and Christianity and followed a few of her favorite brands, including Fox News and then-President Donald Trump.

Though Smith had never expressed interest in conspiracy theories, in just two days Facebook was recommending she join groups dedicated to QAnon, a sprawling and baseless conspiracy theory and movement that claimed Trump was secretly saving the world from a cabal of pedophiles and Satanists.

Smith didn’t follow the recommended QAnon groups, but whatever algorithm Facebook was using to determine how she should engage with the platform pushed ahead just the same. Within one week, Smith’s feed was full of groups and pages that had violated Facebook’s own rules, including those against hate speech and disinformation.

Smith wasn’t a real person. A researcher employed by Facebook invented the account, along with those of other fictitious “test users” in 2019 and 2020, as part of an experiment in studying the platform’s role in misinforming and polarizing users through its recommendations systems.

That researcher said Smith’s Facebook experience was “a barrage of extreme, conspiratorial, and graphic content.”


Facebook’s Algorithm suggested users join groups which supported the QAnon movement (T0151.001: Social Media Platform, T0151.002: Online Community Group, T0153.006: Content Recommendation Algorithm, T0097.208: Social Cause Persona).

Further investigation by Facebook uncovered that its advertising platform had been used to promote QAnon narratives (T0146: Account, T0114: Deliver Ads, T0153.005: Online Advertising Platform):

For years, company researchers had been running experiments like Carol Smith’s to gauge the platform’s hand in radicalizing users, according to the documents seen by NBC News.

This internal work repeatedly found that recommendation tools pushed users into extremist groups, findings that helped inform policy changes and tweaks to recommendations and news feed rankings. Those rankings are a tentacled, ever-evolving system widely known as “the algorithm” that pushes content to users. But the research at that time stopped well short of inspiring any movement to change the groups and pages themselves.

That reluctance was indicative of “a pattern at Facebook,” Haugen told reporters this month. “They want the shortest path between their current policies and any action.”

[...]

By summer 2020, Facebook was hosting thousands of private QAnon groups and pages, with millions of members and followers, according to an unreleased internal investigation.

A year after the FBI designated QAnon as a potential domestic terrorist threat in the wake of standoffs, alleged planned kidnappings, harassment campaigns and shootings, Facebook labeled QAnon a “Violence Inciting Conspiracy Network” and banned it from the platform, along with militias and other violent social movements. A small team working across several of Facebook’s departments found its platforms had hosted hundreds of ads on Facebook and Instagram worth thousands of dollars and millions of views, “praising, supporting, or representing” the conspiracy theory.

[...]

For many employees inside Facebook, the enforcement came too late, according to posts left on Workplace, the company’s internal message board.

“We’ve known for over a year now that our recommendation systems can very quickly lead users down the path to conspiracy theories and groups,” one integrity researcher, whose name had been redacted, wrote in a post announcing she was leaving the company. “This fringe group has grown to national prominence, with QAnon congressional candidates and QAnon hashtags and groups trending in the mainstream. We were willing to act only * after * things had spiraled into a dire state.”

While Facebook’s ban initially appeared effective, a problem remained: The removal of groups and pages didn’t wipe out QAnon’s most extreme followers, who continued to organize on the platform.

“There was enough evidence to raise red flags in the expert community that Facebook and other platforms failed to address QAnon’s violent extremist dimension,” said Marc-André Argentino, a research fellow at King’s College London’s International Centre for the Study of Radicalisation, who has extensively studied QAnon.

Believers simply rebranded as anti-child-trafficking groups or migrated to other communities, including those around the anti-vaccine movement.

[...]

These conspiracy groups had become the fastest-growing groups on Facebook, according to the report, but Facebook wasn’t able to control their “meteoric growth,” the researchers wrote, “because we were looking at each entity individually, rather than as a cohesive movement.” A Facebook spokesperson told BuzzFeed News it took many steps to limit election misinformation but that it was unable to catch everything.
| +| [I00105 Gaming the System: The Use of Gaming-Adjacent Communication, Game and Mod Platforms by Extremist Actors](../../generated_pages/incidents/I00105.md) | In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Indie DB [is a platform that serves] to present indie games, which are titles from independent, small developer teams, which can be discussed and downloaded [Indie DB].

[...]

[On Indie DB we] found antisemitic, Islamist, sexist and other discriminatory content during the exploration. Both games and comments were located that made positive references to National Socialism. Radicalised users seem to use the opportunities to network, create groups, and communicate in forums. In addition, a number of member profiles with graphic propaganda content could be located. We found several games with propagandistic content advertised on the platform, which caused apparently radicalised users to form a fan community around those games. For example, there are titles such as the antisemitic Fursan Al-Aqsa: The Knights of the Al-Aqsa Mosque, which has won the Best Hardcore Game award at the Game Connection America 2024 Game Development Awards and which has received antisemitic praise on its review page. In the game, players need to target members of the Israeli Defence Forces, and attacks by Palestinian terrorist groups such as Hamas or Lions’ Den can be re-enacted. These include bomb attacks, beheadings and the re-enactment of the terrorist attack in Israel on 7 October 2023. We, therefore, deem Indie DB to be relevant for further analyses of extremist activities in digital gaming spaces.


Indie DB is an online software delivery platform on which users can create groups, and participate in discussion forums (T0152.009: Software Delivery Platform, T0151.002: Online Community Group, T0151.009: Legacy Online Forum Platform). The platform hosted games which allowed players to reenact terrorist attacks (T0147.001: Game Asset). | +| [I00114 ‘Carol’s Journey’: What Facebook knew about how it radicalized users](../../generated_pages/incidents/I00114.md) | This report examines internal Facebook communications which reveal employees’ concerns about how the platform’s algorithm was recommending users join extremist conspiracy groups.

In summer 2019, a new Facebook user named Carol Smith signed up for the platform, describing herself as a politically conservative mother from Wilmington, North Carolina. Smith’s account indicated an interest in politics, parenting and Christianity and followed a few of her favorite brands, including Fox News and then-President Donald Trump.

Though Smith had never expressed interest in conspiracy theories, in just two days Facebook was recommending she join groups dedicated to QAnon, a sprawling and baseless conspiracy theory and movement that claimed Trump was secretly saving the world from a cabal of pedophiles and Satanists.

Smith didn’t follow the recommended QAnon groups, but whatever algorithm Facebook was using to determine how she should engage with the platform pushed ahead just the same. Within one week, Smith’s feed was full of groups and pages that had violated Facebook’s own rules, including those against hate speech and disinformation.

Smith wasn’t a real person. A researcher employed by Facebook invented the account, along with those of other fictitious “test users” in 2019 and 2020, as part of an experiment in studying the platform’s role in misinforming and polarizing users through its recommendations systems.

That researcher said Smith’s Facebook experience was “a barrage of extreme, conspiratorial, and graphic content.”


Facebook’s Algorithm suggested users join groups which supported the QAnon movement (T0151.001: Social Media Platform, T0151.002: Online Community Group, T0153.006: Content Recommendation Algorithm, T0097.208: Social Cause Persona).

Further investigation by Facebook uncovered that its advertising platform had been used to promote QAnon narratives (T0146: Account Asset, T0114: Deliver Ads, T0153.005: Online Advertising Platform):

For years, company researchers had been running experiments like Carol Smith’s to gauge the platform’s hand in radicalizing users, according to the documents seen by NBC News.

This internal work repeatedly found that recommendation tools pushed users into extremist groups, findings that helped inform policy changes and tweaks to recommendations and news feed rankings. Those rankings are a tentacled, ever-evolving system widely known as “the algorithm” that pushes content to users. But the research at that time stopped well short of inspiring any movement to change the groups and pages themselves.

That reluctance was indicative of “a pattern at Facebook,” Haugen told reporters this month. “They want the shortest path between their current policies and any action.”

[...]

By summer 2020, Facebook was hosting thousands of private QAnon groups and pages, with millions of members and followers, according to an unreleased internal investigation.

A year after the FBI designated QAnon as a potential domestic terrorist threat in the wake of standoffs, alleged planned kidnappings, harassment campaigns and shootings, Facebook labeled QAnon a “Violence Inciting Conspiracy Network” and banned it from the platform, along with militias and other violent social movements. A small team working across several of Facebook’s departments found its platforms had hosted hundreds of ads on Facebook and Instagram worth thousands of dollars and millions of views, “praising, supporting, or representing” the conspiracy theory.

[...]

For many employees inside Facebook, the enforcement came too late, according to posts left on Workplace, the company’s internal message board.

“We’ve known for over a year now that our recommendation systems can very quickly lead users down the path to conspiracy theories and groups,” one integrity researcher, whose name had been redacted, wrote in a post announcing she was leaving the company. “This fringe group has grown to national prominence, with QAnon congressional candidates and QAnon hashtags and groups trending in the mainstream. We were willing to act only * after * things had spiraled into a dire state.”

While Facebook’s ban initially appeared effective, a problem remained: The removal of groups and pages didn’t wipe out QAnon’s most extreme followers, who continued to organize on the platform.

“There was enough evidence to raise red flags in the expert community that Facebook and other platforms failed to address QAnon’s violent extremist dimension,” said Marc-André Argentino, a research fellow at King’s College London’s International Centre for the Study of Radicalisation, who has extensively studied QAnon.

Believers simply rebranded as anti-child-trafficking groups or migrated to other communities, including those around the anti-vaccine movement.

[...]

These conspiracy groups had become the fastest-growing groups on Facebook, according to the report, but Facebook wasn’t able to control their “meteoric growth,” the researchers wrote, “because we were looking at each entity individually, rather than as a cohesive movement.” A Facebook spokesperson told BuzzFeed News it took many steps to limit election misinformation but that it was unable to catch everything.
| | [I00123 The Extreme Right on Steam](../../generated_pages/incidents/I00123.md) | Analysis of communities on the gaming platform Steam showed that groups who are known to have engaged in acts of terrorism used Steam to host social communities (T0152.009: Software Delivery Platform, T0151.002: Online Community Group):

The first is a Finnish-language group which was set up to promote the Nordic Resistance Movement (NRM). NRM are the only group in the sample examined by ISD known to have engaged in terrorist attacks. Swedish members of the group conducted a series of bombings in Gothenburg in 2016 and 2017, and several Finnish members are under investigation in relation to both violent attacks and murder.

The NRM Steam group does not host content related to gaming, and instead seems to act as a hub for the movement. The group’s overview section contains a link to the official NRM website, and users are encouraged to find like-minded people to join the group. The group is relatively small, with 87 members, but at the time of writing, it appeared to be active and in use. Interestingly, although the group is in Finnish language, it has members in common with the English language channels identified in this analysis. This suggests that Steam may help facilitate international exchange between right-wing extremists.
, ISD conducted an investigation into the usage of social groups on Steam. Steam is an online platform used to buy and sell digital games, and includes the Steam community feature, which “allows users to find friends and join groups and discussion forums, while also offering in-game voice and text chat”. Actors have used Steam’s social capabilities to enable online harm campaigns:

One function of these Steam groups is the organisation of ‘raids’ – coordinated trolling activity against their political opponents. An example of this can be seen in a white power music group sharing a link to an Israeli Steam group, encouraging other members to “help me raid this juden [German word for Jew] group”. The comments section of said target group show that neo-Nazi and antisemitic comments were consistently posted in the group just two minutes after the instruction had been posted in the extremist group, highlighting the swiftness with which racially motivated harassment can be directed online.

Threat actors used social groups on Steam to organise harassment of targets (T0152.009: Software Delivery Platform, T0151.002: Online Community Group, T0049.005: Conduct Swarming, T0048: Harass)., ISD conducted an investigation into the usage of social groups on Steam. Steam is an online platform used to buy and sell digital games, and includes the Steam community feature, which “allows users to find friends and join groups and discussion forums, while also offering in-game voice and text chat”. Actors have used Steam’s social capabilities to enable online harm campaigns:

A number of groups were observed encouraging members to join conversations on outside platforms. These include links to Telegram channels connected to white supremacist marches, and media outlets, forums and Discord servers run by neo-Nazis.

[...]

This off-ramping activity demonstrates how rather than sitting in isolation, Steam fits into the wider extreme right wing online ecosystem, with Steam groups acting as hubs for communities and organizations which span multiple platforms. Accordingly, although the platform appears to fill a specific role in the building and strengthening of communities with similar hobbies and interests, it is suggested that analysis seeking to determine the risk of these communities should focus on their activity across platforms


Social Groups on Steam were used to drive new people to other neo-Nazi controlled community assets (T0122: Direct Users to Alternative Platforms, T0152.009: Software Delivery Platform, T0151.002: Online Community Group). | -| [I00128 #TrollTracker: Outward Influence Operation From Iran](../../generated_pages/incidents/I00128.md) | [Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.

The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.

Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.


In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code), which took users to a document hosted on another website (T0152.004: Website). | +| [I00128 #TrollTracker: Outward Influence Operation From Iran](../../generated_pages/incidents/I00128.md) | [Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.

The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.

Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.


In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code Asset), which took users to a document hosted on another website (T0152.004: Website Asset). | diff --git a/generated_pages/techniques/T0151.003.md b/generated_pages/techniques/T0151.003.md index 70d081f..813934a 100644 --- a/generated_pages/techniques/T0151.003.md +++ b/generated_pages/techniques/T0151.003.md @@ -7,8 +7,8 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00106 Facebook Is Being Flooded With Gross AI-Generated Images of Hurricane Helene Devastation](../../generated_pages/incidents/I00106.md) | As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.

A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.

But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.

Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.

But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.

The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.


A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis).

The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account, T0148.007: eCommerce Platform). | -| [I00108 How you thought you support the animals and you ended up funding white supremacists](../../generated_pages/incidents/I00108.md) | This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account, T0153.005: Online Advertising Platform). | +| [I00106 Facebook Is Being Flooded With Gross AI-Generated Images of Hurricane Helene Devastation](../../generated_pages/incidents/I00106.md) | As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.

A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.

But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.

Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.

But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.

The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.


A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis).

The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account Asset, T0148.007: eCommerce Platform). | +| [I00108 How you thought you support the animals and you ended up funding white supremacists](../../generated_pages/incidents/I00108.md) | This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website Asset), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account Asset, T0153.005: Online Advertising Platform). | diff --git a/generated_pages/techniques/T0151.004.md b/generated_pages/techniques/T0151.004.md index e63ed04..c76fbf2 100644 --- a/generated_pages/techniques/T0151.004.md +++ b/generated_pages/techniques/T0151.004.md @@ -7,10 +7,9 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00006 Columbian Chemicals](../../generated_pages/incidents/I00006.md) | Use SMS/text messages | -| [I00068 Attempted Audio Deepfake Call Targets LastPass Employee](../../generated_pages/incidents/I00068.md) | “While reports of [...] deepfake calls targeting private companies are luckily still rare, LastPass itself experienced a deepfake attempt earlier today that we are sharing with the larger community to help raise awareness that this tactic is spreading and all companies should be on the alert. In our case, an employee received a series of calls, texts, and at least one voicemail featuring an audio deepfake from a threat actor impersonating our CEO via WhatsApp. As the attempted communication was outside of normal business communication channels and due to the employee’s suspicion regarding the presence of many of the hallmarks of a social engineering attempt (such as forced urgency), our employee rightly ignored the messages and reported the incident to our internal security team so that we could take steps to both mitigate the threat and raise awareness of the tactic both internally and externally.”

In this example attackers created an account on WhatsApp which impersonated the CEO of lastpass (T0097.100: Individual Persona, T0143.003: Impersonated Persona, T0146: Account, T0151.004: Chat Platform, T0155.007: Encrypted Communication Channel). They used this asset to target an employee using deepfaked audio (T0088.001: Develop AI-Generated Audio (Deepfakes)). | -| [I00084 Russia turns its diplomats into disinformation warriors](../../generated_pages/incidents/I00084.md) | “[Russia’s social media] reach isn't the same as Russian state media, but they are trying to recreate what RT and Sputnik had done," said one EU official involved in tracking Russian disinformation. "It's a coordinated effort that goes beyond social media and involves specific websites."

“Central to that wider online playbook is a Telegram channel called Warfakes and an affiliated website. Since the beginning of the conflict, that social media channel has garnered more than 725,000 members and repeatedly shares alleged fact-checks aimed at debunking Ukrainian narratives, using language similar to Western-style fact-checking outlets.”


In this example a Telegram channel (T0151.004: Chat Platform, T0155.007: Encrypted Communication Channel) was established which presented itself as a source of fact checks (T0097.203: Fact Checking Organisation Persona). | -| [I00113 Inside the Shadowy World of Disinformation for Hire in Kenya](../../generated_pages/incidents/I00113.md) | Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | +| [I00068 Attempted Audio Deepfake Call Targets LastPass Employee](../../generated_pages/incidents/I00068.md) | “While reports of [...] deepfake calls targeting private companies are luckily still rare, LastPass itself experienced a deepfake attempt earlier today that we are sharing with the larger community to help raise awareness that this tactic is spreading and all companies should be on the alert. In our case, an employee received a series of calls, texts, and at least one voicemail featuring an audio deepfake from a threat actor impersonating our CEO via WhatsApp. As the attempted communication was outside of normal business communication channels and due to the employee’s suspicion regarding the presence of many of the hallmarks of a social engineering attempt (such as forced urgency), our employee rightly ignored the messages and reported the incident to our internal security team so that we could take steps to both mitigate the threat and raise awareness of the tactic both internally and externally.”

In this example attackers created an account on WhatsApp which impersonated the CEO of lastpass (T0097.100: Individual Persona, T0143.003: Impersonated Persona, T0146: Account Asset, T0151.004: Chat Platform, T0155.007: Encrypted Communication Channel). They used this asset to target an employee using deepfaked audio (T0088.001: Develop AI-Generated Audio (Deepfakes)). | +| [I00084 Russia turns its diplomats into disinformation warriors](../../generated_pages/incidents/I00084.md) | “[Russia’s social media] reach isn't the same as Russian state media, but they are trying to recreate what RT and Sputnik had done," said one EU official involved in tracking Russian disinformation. "It's a coordinated effort that goes beyond social media and involves specific websites."

“Central to that wider online playbook is a Telegram channel called Warfakes and an affiliated website. Since the beginning of the conflict, that social media channel has garnered more than 725,000 members and repeatedly shares alleged fact-checks aimed at debunking Ukrainian narratives, using language similar to Western-style fact-checking outlets.”


In this example a Telegram channel (T0151.004: Chat Platform, T0155.007: Encrypted Communication Channel) was established which presented itself as a source of fact checks (T0097.203: Fact Checking Organisation Persona)., “[Russia’s social media] reach isn't the same as Russian state media, but they are trying to recreate what RT and Sputnik had done," said one EU official involved in tracking Russian disinformation. "It's a coordinated effort that goes beyond social media and involves specific websites."

“Central to that wider online playbook is a Telegram channel called Warfakes and an affiliated website. Since the beginning of the conflict, that social media channel has garnered more than 725,000 members and repeatedly shares alleged fact-checks aimed at debunking Ukrainian narratives, using language similar to Western-style fact-checking outlets.”


In this example a Telegram channel (T0151.004: Chat Platform, T0155.007: Encrypted Communication Channel) was established which presented itself as a source of fact checks (T0097.203: Fact Checking Organisation Persona). | +| [I00113 Inside the Shadowy World of Disinformation for Hire in Kenya](../../generated_pages/incidents/I00113.md) | Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account Asset, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | | [I00122 The Extreme Right on Discord](../../generated_pages/incidents/I00122.md) | Discord is an example of a T0151.004: Chat Platform, which allows users to create their own T0151.005: Chat Community Server. The Institute for Strategic Dialog (ISD) conducted an investigation into the extreme right’s usage of Discord servers:

Discord is a free service accessible via phones and computers. It allows users to talk to each other in real time via voice, text or video chat and emerged in 2015 as a platform designed to assist gamers in communicating with each other while playing video games. The popularity of the platform has surged in recent years, and it is currently estimated to have 140 million monthly active users.

Chatrooms – known as servers - in the platform can be created by anyone, and they are used for a range of purposes that extend far beyond gaming. Such purposes include the discussion of extreme right-wing ideologies and the planning of offline extremist activity. Ahead of the far-right Unite the Right rally in Charlottesville, Virginia, in August 2017, organisers used Discord to plan and promote events and posted swastikas and praised Hitler in chat rooms with names like “National Socialist Army” and “Führer’s Gas Chamber”.


In this example a Discord server was used to organise the 2017 Charlottesville Unite the Right rally. Chat rooms such in the server were used to discuss different topics related to the rally (T0057: Organise Events, T0126.002: Facilitate Logistics or Support for Attendance, T0151.004: Chat Platform, T0151.005: Chat Community Server, T0151.006: Chat Room).

Another primary activity engaged in the servers analysed are raids against other servers associated with political opponents, and in particular those that appear to be pro-LGBTQ. Raids are a phenomenon in which a small group of users will join a Discord server with the sole purpose of spamming the host with offensive or incendiary messages and content with the aim of upsetting local users or having the host server banned by Discord. On two servers examined here, raiding was their primary function.

Among servers devoted to this activity, specific channels were often created to host links to servers that users were then encouraged to raid. Users are encouraged to be as offensive as possible with the aim of upsetting or angering users on the raided server, and channels often had content banks of offensive memes and content to be shared on raided servers.

The use of raids demonstrates the gamified nature of extremist activity on Discord, where use of the platform and harassment of political opponents is itself turned into a type of real-life video game designed to strengthen in-group affiliation. This combined with the broader extremist activity identified in these channels suggests that the combative activity of raiding could provide a pathway for younger people to become more engaged with extremist activity.


Discord servers were used by members of the extreme right to coordinate harassment of targeted communities (T0048: Harass, T0049.005: Conduct Swarming, T0151.004: Chat Platform, T0151.005: Chat Community Server). | diff --git a/generated_pages/techniques/T0151.007.md b/generated_pages/techniques/T0151.007.md index 7ea28af..f798f1b 100644 --- a/generated_pages/techniques/T0151.007.md +++ b/generated_pages/techniques/T0151.007.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00113 Inside the Shadowy World of Disinformation for Hire in Kenya](../../generated_pages/incidents/I00113.md) | Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | +| [I00113 Inside the Shadowy World of Disinformation for Hire in Kenya](../../generated_pages/incidents/I00113.md) | Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account Asset, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | diff --git a/generated_pages/techniques/T0151.008.md b/generated_pages/techniques/T0151.008.md index 50e25d5..c117338 100644 --- a/generated_pages/techniques/T0151.008.md +++ b/generated_pages/techniques/T0151.008.md @@ -8,12 +8,12 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | | [I00097 Report: Not Just Algorithms](../../generated_pages/incidents/I00097.md) | This report explores the role of four systems (recommender systems, content moderation systems, ad approval systems and ad management systems) in creating risks around eating disorders.

[...]

Content recommender systems can create risks. We created and primed ‘fake’ accounts for 16-year old Australians and found that some recommender systems will promote pro-eating disorder content to children.

Specifically: On TikTok, 0% of the content recommended was classified as pro-eating disorder content; On Instagram, 23% of the content recommended was classified as pro-eating disorder content; On X, 67% of content recommended was classified as pro-eating disorder content (and disturbingly, another 13% displayed self-harm imagery).


Content recommendation algorithms developed by Instagram (T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm) and X (T0151.008: Microblogging Platform, T0153.006: Content Recommendation Algorithm) promoted harmful content to an account presenting as a 16 year old Australian. | -| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform). | -| [I00113 Inside the Shadowy World of Disinformation for Hire in Kenya](../../generated_pages/incidents/I00113.md) | Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | -| [I00116 Blue-tick scammers target consumers who complain on X](../../generated_pages/incidents/I00116.md) | Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account, T0146.003: Verified Account, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created). | -| [I00120 factcheckUK or fakecheckUK? Reinventing the political faction as the impartial factchecker](../../generated_pages/incidents/I00120.md) | Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:


That is, until a few minutes into the debate.

All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.

The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.

The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it


In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing, T0146.003: Verified Account, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). | -| [I00125 The Agency](../../generated_pages/incidents/I00125.md) | In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:

[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.

Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed).

A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. | -| [I00129 Teen who hacked Joe Biden and Bill Gates' Twitter accounts sentenced to three years in prison](../../generated_pages/incidents/I00129.md) | An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.

Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.

Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.

Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.


In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account, T0150.005: Compromised, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account, T0143.003: Impersonated Persona, T0150.005: Compromised, T0151.008: Microblogging Platform).

The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). | +| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform). | +| [I00113 Inside the Shadowy World of Disinformation for Hire in Kenya](../../generated_pages/incidents/I00113.md) | Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:

In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.

[...]

They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.


An M-PESA account (T0148.002: Bank Account Asset, T0148.001: Online Banking Platform) was used to pay campaign participants.

Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). | +| [I00116 Blue-tick scammers target consumers who complain on X](../../generated_pages/incidents/I00116.md) | Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.

Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.

They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.

Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.

Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.

“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”

Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.


In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account Asset, T0146.003: Verified Account Asset, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created Asset). | +| [I00120 factcheckUK or fakecheckUK? Reinventing the political faction as the impartial factchecker](../../generated_pages/incidents/I00120.md) | Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:

The evening of the 19th November 2019 saw the first of three Leaders’ Debates on ITV, starting at 8pm and lasting for an hour. Current Prime Minister and leader of the Conservatives, Boris Johnson faced off against Labour party leader, Jeremy Corbyn. Plenty of people will have been watching the debate live, but a good proportion were “watching” (er, “twitching”?) via Twitter. This is something I’ve done in the past for certain shows. In some cases I just can’t watch or listen, but I can read, and in other cases, the commentary is far more interesting and entertaining than the show itself will ever be. This, for me, is just such a case. But very quickly, all eyes turned upon a modestly sized account with the handle @CCHQPress. That’s short for Conservative Campaign Headquarters Press. According to their (current!) Twitter bio, they are based in Westminster and they provide “snippets of news and commentary from CCHQ” to their 75k followers.

That is, until a few minutes into the debate.

All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.

The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.

The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it


In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing Asset, T0146.003: Verified Account Asset, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). | +| [I00125 The Agency](../../generated_pages/incidents/I00125.md) | In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:

[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.

Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account Asset, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed Asset).

A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. | +| [I00129 Teen who hacked Joe Biden and Bill Gates' Twitter accounts sentenced to three years in prison](../../generated_pages/incidents/I00129.md) | An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.

Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.

Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.

Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.


In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account Asset, T0150.005: Compromised Asset, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account Asset, T0143.003: Impersonated Persona, T0150.005: Compromised Asset, T0151.008: Microblogging Platform).

The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). | diff --git a/generated_pages/techniques/T0151.009.md b/generated_pages/techniques/T0151.009.md index 64c02d3..363a2b5 100644 --- a/generated_pages/techniques/T0151.009.md +++ b/generated_pages/techniques/T0151.009.md @@ -7,9 +7,9 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00105 Gaming the System: The Use of Gaming-Adjacent Communication, Game and Mod Platforms by Extremist Actors](../../generated_pages/incidents/I00105.md) | In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Gamer Uprising Forums (GUF) [is an online discussion platform using the classic forum structure] aimed directly at gamers. It is run by US Neo-Nazi Andrew Anglin and explicitly targets politically right-wing gamers. This forum mainly includes antisemitic, sexist, and racist topics, but also posts on related issues such as esotericism, conspiracy narratives, pro-Russian propaganda, alternative medicine, Christian religion, content related to the incel- and manosphere, lists of criminal offences committed by non-white people, links to right-wing news sites, homophobia and trans-hostility, troll guides, anti-leftism, ableism and much more. Most noticeable were the high number of antisemitic references. For example, there is a thread with hundreds of machine-generated images, most of which feature openly antisemitic content and popular antisemitic references. Many users chose explicitly antisemitic avatars. Some of the usernames also provide clues to the users’ ideologies and profiles feature swastikas as a type of progress bar and indicator of the user’s activity in the forum.

The GUF’s front page contains an overview of the forum, user statistics, and so-called “announcements”. In addition to advice-like references, these feature various expressions of hateful ideologies. At the time of the exploration, the following could be read there: “Jews are the problem!”, “Women should be raped”, “The Jews are going to be required to return stolen property”, “Immigrants will have to be physically removed”, “Console gaming is for n******” and “Anger is a womanly emotion”. New users have to prove themselves in an area for newcomers referred to in imageboard slang as the “Newfag Barn”. Only when the newcomers’ posts have received a substantial number of likes from established users, are they allowed to post in other parts of the forum. It can be assumed that this will also lead to competitions to outdo each other in posting extreme content. However, it is always possible to view all posts and content on the site. In any case, it can be assumed that the platform hardly addresses milieus that are not already radicalised or at risk of radicalisation and is therefore deemed relevant for radicalisation research. However, the number of registered users is low (typical for radicalised milieus) and, hence, the platform may only be of interest when studying a small group of highly radicalised individuals.


Gamer Uprising Forum is a legacy online forum, with access gated behind approval of existing platform users (T0155.003: Approval Gated, T0151.009: Legacy Online Forum Platform). | -| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform). | -| [I00118 ‘War Thunder’ players are once again leaking sensitive military technology information on a video game forum](../../generated_pages/incidents/I00118.md) | In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.

This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.

A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.

The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.

The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual.

[...]

A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.


A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). | +| [I00105 Gaming the System: The Use of Gaming-Adjacent Communication, Game and Mod Platforms by Extremist Actors](../../generated_pages/incidents/I00105.md) | In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Gamer Uprising Forums (GUF) [is an online discussion platform using the classic forum structure] aimed directly at gamers. It is run by US Neo-Nazi Andrew Anglin and explicitly targets politically right-wing gamers. This forum mainly includes antisemitic, sexist, and racist topics, but also posts on related issues such as esotericism, conspiracy narratives, pro-Russian propaganda, alternative medicine, Christian religion, content related to the incel- and manosphere, lists of criminal offences committed by non-white people, links to right-wing news sites, homophobia and trans-hostility, troll guides, anti-leftism, ableism and much more. Most noticeable were the high number of antisemitic references. For example, there is a thread with hundreds of machine-generated images, most of which feature openly antisemitic content and popular antisemitic references. Many users chose explicitly antisemitic avatars. Some of the usernames also provide clues to the users’ ideologies and profiles feature swastikas as a type of progress bar and indicator of the user’s activity in the forum.

The GUF’s front page contains an overview of the forum, user statistics, and so-called “announcements”. In addition to advice-like references, these feature various expressions of hateful ideologies. At the time of the exploration, the following could be read there: “Jews are the problem!”, “Women should be raped”, “The Jews are going to be required to return stolen property”, “Immigrants will have to be physically removed”, “Console gaming is for n******” and “Anger is a womanly emotion”. New users have to prove themselves in an area for newcomers referred to in imageboard slang as the “Newfag Barn”. Only when the newcomers’ posts have received a substantial number of likes from established users, are they allowed to post in other parts of the forum. It can be assumed that this will also lead to competitions to outdo each other in posting extreme content. However, it is always possible to view all posts and content on the site. In any case, it can be assumed that the platform hardly addresses milieus that are not already radicalised or at risk of radicalisation and is therefore deemed relevant for radicalisation research. However, the number of registered users is low (typical for radicalised milieus) and, hence, the platform may only be of interest when studying a small group of highly radicalised individuals.


Gamer Uprising Forum is a legacy online forum, with access gated behind approval of existing platform users (T0155.003: Approval Gated Asset, T0151.009: Legacy Online Forum Platform). | +| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform). | +| [I00118 ‘War Thunder’ players are once again leaking sensitive military technology information on a video game forum](../../generated_pages/incidents/I00118.md) | In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.

This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.

A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.

The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.

The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual.

[...]

A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.


A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account Asset, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). | diff --git a/generated_pages/techniques/T0151.011.md b/generated_pages/techniques/T0151.011.md index 28ec6d1..a4aa344 100644 --- a/generated_pages/techniques/T0151.011.md +++ b/generated_pages/techniques/T0151.011.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00101 Pro-Putin Disinformation Warriors Take War of Aggression to Reddit](../../generated_pages/incidents/I00101.md) | This report looks at changes content posted to communities on Reddit (called Subreddits) after teams of voluntary moderators are replaced with what appear to be pro-Russian voices:

The r/antiwar subreddit appears to be a very recent takeover target. With 12,900 members it is not the largest community on Reddit, but this does place it squarely within the top 5% of all communities in terms of membership.

Three months ago a new moderator team was instated by subreddit head u/democracy101. Any posts documenting Russian aggression in Ukraine are now swiftly removed, while the board has been flooded with posts about how Ukraine is losing, or how American “neocons wrecked” the country.

The pinned post from moderator u/n0ahbody proclaims: “People who call for an end to Russian aggressions but not the Western aggressions Russia is reacting to don’t really want peace.” This user takes the view that any negative opinion about Russia is “shaped by what the fanatically Russophobic MSM wants you to think,” and that the United States is not threatened by its neighbors. Russia is.”

When u/n0ahbody took over the sub, the user posted a triumphant and vitriolic diatribe in another pro-Russia subreddit with some 33,500 members, r/EndlessWar. “We are making progress. We are purging the sub of all NAFO and NAFO-adjacent elements. Hundreds of them have been banned over the last 24 hours for various rule infractions, for being NAFO or NAFO-adjacent,” the user said, referencing the grassroots, pro-Ukrainian North Atlantic Fella Organization (NAFO) meme movement.

Several former users have reported they have indeed been banned from r/antiwar since the change in moderators. “If this subreddit cannot be explicitly against the invasion of Ukraine it will never truly be anti-war,” wrote one user Halcyon_Rein, in the antiwar subreddit on September 6. They then edited the post to say, “Edit: btw, I got f**king banned for this 💀💀💀”


A community hosted on Reddit was taken over by new moderators (T0151.011: Community Sub-Forum, T0150.005: Compromised). These moderators removed content posted to the community which favoured Ukraine over Russia (T0146.004: Administrator Account, T0151.011: Community Sub-Forum, T0124: Suppress Opposition). | +| [I00101 Pro-Putin Disinformation Warriors Take War of Aggression to Reddit](../../generated_pages/incidents/I00101.md) | This report looks at changes content posted to communities on Reddit (called Subreddits) after teams of voluntary moderators are replaced with what appear to be pro-Russian voices:

The r/antiwar subreddit appears to be a very recent takeover target. With 12,900 members it is not the largest community on Reddit, but this does place it squarely within the top 5% of all communities in terms of membership.

Three months ago a new moderator team was instated by subreddit head u/democracy101. Any posts documenting Russian aggression in Ukraine are now swiftly removed, while the board has been flooded with posts about how Ukraine is losing, or how American “neocons wrecked” the country.

The pinned post from moderator u/n0ahbody proclaims: “People who call for an end to Russian aggressions but not the Western aggressions Russia is reacting to don’t really want peace.” This user takes the view that any negative opinion about Russia is “shaped by what the fanatically Russophobic MSM wants you to think,” and that the United States is not threatened by its neighbors. Russia is.”

When u/n0ahbody took over the sub, the user posted a triumphant and vitriolic diatribe in another pro-Russia subreddit with some 33,500 members, r/EndlessWar. “We are making progress. We are purging the sub of all NAFO and NAFO-adjacent elements. Hundreds of them have been banned over the last 24 hours for various rule infractions, for being NAFO or NAFO-adjacent,” the user said, referencing the grassroots, pro-Ukrainian North Atlantic Fella Organization (NAFO) meme movement.

Several former users have reported they have indeed been banned from r/antiwar since the change in moderators. “If this subreddit cannot be explicitly against the invasion of Ukraine it will never truly be anti-war,” wrote one user Halcyon_Rein, in the antiwar subreddit on September 6. They then edited the post to say, “Edit: btw, I got f**king banned for this 💀💀💀”


A community hosted on Reddit was taken over by new moderators (T0151.011: Community Sub-Forum, T0150.005: Compromised Asset). These moderators removed content posted to the community which favoured Ukraine over Russia (T0146.004: Administrator Account Asset, T0151.011: Community Sub-Forum, T0124: Suppress Opposition). | diff --git a/generated_pages/techniques/T0151.012.md b/generated_pages/techniques/T0151.012.md index 9c4bf43..57132f2 100644 --- a/generated_pages/techniques/T0151.012.md +++ b/generated_pages/techniques/T0151.012.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00102 Ignore The Poway Synagogue Shooter’s Manifesto: Pay Attention To 8chan’s /pol/ Board](../../generated_pages/incidents/I00102.md) | On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | +| [I00102 Ignore The Poway Synagogue Shooter’s Manifesto: Pay Attention To 8chan’s /pol/ Board](../../generated_pages/incidents/I00102.md) | On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | | [I00104 Macron Campaign Hit With “Massive and Coordinated” Hacking Attack](../../generated_pages/incidents/I00104.md) | A massive trove of documents purporting to contain thousands of emails and other files from the [2017 presidential] campaign of Emmanuel Macron—the French centrist candidate squaring off against right-wing nationalist Marine Le Pen—was posted on the internet Friday afternoon. The Macron campaign says that at least some of the documents are fake. The document dump came just over a day before voting is set to begin in the final round of the election and mere hours before candidates are legally required to stop campaigning.

At about 2:35 p.m. ET, a post appeared on the 4chan online message board announcing the leak. The documents appear to include emails, internal memos, and screenshots of purported banking records.

“In this pastebin are links to torrents of emails between Macron, his team and other officials, politicians as well as original documents and photos,” the anonymous 4chan poster wrote. “This was passed on to me today so now I am giving it to you, the people. The leak is massvie and released in the hopes that the human search engine here will be able to start sifting through the contents and figure out exactly what we have here.”

The Macron campaign issued a statement Friday night saying it was the victim of a “massive and coordinated” hacking attack. That campaign said the leak included some fake documents that were intended “to sow doubt and misinformation.”


Actors posted a to 4chan a link (T0151.012: Image Board Platform, T0146.006: Open Access Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms) to text content hosted on pastebin (T0152.005: Paste Platform, T0146.006: Open Access Platform, T0115: Post Content), which contained links to download stolen and fabricated documents. | diff --git a/generated_pages/techniques/T0151.014.md b/generated_pages/techniques/T0151.014.md index b12b5bd..96d818d 100644 --- a/generated_pages/techniques/T0151.014.md +++ b/generated_pages/techniques/T0151.014.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00111 Patreon Is Bankrolling Climate Change Deniers While We All Burn](../../generated_pages/incidents/I00111.md) | In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access). | +| [I00111 Patreon Is Bankrolling Climate Change Deniers While We All Burn](../../generated_pages/incidents/I00111.md) | In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account Asset, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access Asset). | diff --git a/generated_pages/techniques/T0151.015.md b/generated_pages/techniques/T0151.015.md index 3385624..57a6b9e 100644 --- a/generated_pages/techniques/T0151.015.md +++ b/generated_pages/techniques/T0151.015.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00098 Gaming The System: How Extremists Exploit Gaming Sites And What Can Be Done To Counter Them](../../generated_pages/incidents/I00098.md) | This report looks at how extremists exploit games and gaming adjacent platforms:

Ethnic Cleansing, a first-person shooter game created by the U.S. white supremacist organization National Alliance in 2002, was advertised as follows: “[T]he Race War has already begun. Your character, Will, runs through a ghetto blasting away at various blacks and spics to attempt to gain entrance to the subway system...where the jews have hidden to avoid the carnage. Then you get to blow away jews as they scream ‘Oy Vey!’ on your way to the command center.”

While games like Ethnic Cleansing —and others of a similar genocidal variety, including titles like Shoot the Blacks, Hatred, and Muslim Massacre —generate some press attention and controversy, they tend to be ignored by the large majority of gamers, who consider them “crude” and “badly designed.” Nevertheless, these games are still available in some corners of the Internet and have been downloaded by hundreds of thousands of users.

A second strategy involves the modification (“modding”) of existing video games to twist the narrative into an extremist message or fantasy. One example is the Islamic State terrorist group’s modding of the first-person shooter video game, ARMA III, to make Islamic fighters the heroic protagonists rather than the villains. With their powerful immersive quality, these video games have, in some instances, been effective at inspiring and allegedly training extremists to perpetrate realworld attacks.

Third, extremist actors have used in-game chat functions to open lines of communication with ideological sympathizers and potential recruits. Although the lack of in-game communication data has made it impossible for academic researchers to track the presence of extremists in a systematic way, there is enough anecdotal evidence—including evidence obtained from police investigation files—to infer that in-game chatrooms can and do function as “radicalization funnels” in at least some cases.


White supremacists created a game aligned with their ideology (T0147.001: Game). The Islamic State terrorist group created a mod of the game ARMA 3 (T0151.015: Online Game Platform, T0147.002: Game Mod). Extremists also use communication features available in online games to recruit new members. | +| [I00098 Gaming The System: How Extremists Exploit Gaming Sites And What Can Be Done To Counter Them](../../generated_pages/incidents/I00098.md) | This report looks at how extremists exploit games and gaming adjacent platforms:

Ethnic Cleansing, a first-person shooter game created by the U.S. white supremacist organization National Alliance in 2002, was advertised as follows: “[T]he Race War has already begun. Your character, Will, runs through a ghetto blasting away at various blacks and spics to attempt to gain entrance to the subway system...where the jews have hidden to avoid the carnage. Then you get to blow away jews as they scream ‘Oy Vey!’ on your way to the command center.”

While games like Ethnic Cleansing —and others of a similar genocidal variety, including titles like Shoot the Blacks, Hatred, and Muslim Massacre —generate some press attention and controversy, they tend to be ignored by the large majority of gamers, who consider them “crude” and “badly designed.” Nevertheless, these games are still available in some corners of the Internet and have been downloaded by hundreds of thousands of users.

A second strategy involves the modification (“modding”) of existing video games to twist the narrative into an extremist message or fantasy. One example is the Islamic State terrorist group’s modding of the first-person shooter video game, ARMA III, to make Islamic fighters the heroic protagonists rather than the villains. With their powerful immersive quality, these video games have, in some instances, been effective at inspiring and allegedly training extremists to perpetrate realworld attacks.

Third, extremist actors have used in-game chat functions to open lines of communication with ideological sympathizers and potential recruits. Although the lack of in-game communication data has made it impossible for academic researchers to track the presence of extremists in a systematic way, there is enough anecdotal evidence—including evidence obtained from police investigation files—to infer that in-game chatrooms can and do function as “radicalization funnels” in at least some cases.


White supremacists created a game aligned with their ideology (T0147.001: Game Asset). The Islamic State terrorist group created a mod of the game ARMA 3 (T0151.015: Online Game Platform, T0147.002: Game Mod Asset). Extremists also use communication features available in online games to recruit new members. | diff --git a/generated_pages/techniques/T0152.001.md b/generated_pages/techniques/T0152.001.md index 7127c69..97c3dae 100644 --- a/generated_pages/techniques/T0152.001.md +++ b/generated_pages/techniques/T0152.001.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00119 Independent journalist publishes Trump campaign document hacked by Iran despite election interference concerns](../../generated_pages/incidents/I00119.md) | An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog, T0150.003: Pre-Existing). | +| [I00119 Independent journalist publishes Trump campaign document hacked by Iran despite election interference concerns](../../generated_pages/incidents/I00119.md) | An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account Asset, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog Asset, T0150.003: Pre-Existing Asset). | diff --git a/generated_pages/techniques/T0152.002.md b/generated_pages/techniques/T0152.002.md index 2f2f31e..c2698a8 100644 --- a/generated_pages/techniques/T0152.002.md +++ b/generated_pages/techniques/T0152.002.md @@ -1,4 +1,4 @@ -# Technique T0152.002: Blog +# Technique T0152.002: Blog Asset * **Summary**: Blogs are a collation of posts centred on a particular topic, author, or collection of authors.

Some platforms are designed to support users in hosting content online, such as Blogging Platforms like Substack which allow users to create Blogs, but other online platforms can also be used to produce a Blog; a Paid Account on X (prev Twitter) is able to post long-form text content to their timeline in a style of a blog.

Actors may create Accounts on Blogging Platforms to create a Blog, or make their own Blog on a Website. @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00119 Independent journalist publishes Trump campaign document hacked by Iran despite election interference concerns](../../generated_pages/incidents/I00119.md) | An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog, T0150.003: Pre-Existing). | +| [I00119 Independent journalist publishes Trump campaign document hacked by Iran despite election interference concerns](../../generated_pages/incidents/I00119.md) | An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account Asset, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog Asset, T0150.003: Pre-Existing Asset). | diff --git a/generated_pages/techniques/T0152.003.md b/generated_pages/techniques/T0152.003.md index 4615e92..216ee33 100644 --- a/generated_pages/techniques/T0152.003.md +++ b/generated_pages/techniques/T0152.003.md @@ -8,7 +8,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | | [I00107 The Lies Russia Tells Itself](../../generated_pages/incidents/I00107.md) | The Moscow firm Social Design Agency (SDA) has been attributed as being behind a Russian disinformation project known as Doppelganger:

The SDA’s deception work first surfaced in 2022, likely almost immediately after Doppelganger got off the ground. In April of that year, Meta, the parent company of Facebook and Instagram, disclosed in a quarterly report that it had removed from its platforms “a network of about 200 accounts operated from Russia.” By August 2022, German investigative journalists revealed that they had discovered forgeries of about 30 news sites, including many of the country’s biggest media outlets—Frankfurter Allgemeine, Der Spiegel, and Bild—but also Britain’s Daily Mail and France’s 20 Minutes. The sites had deceptive URLs such as www-dailymail-co-uk.dailymail.top.


As part of the SDA’s work, they created many websites which impersonated existing media outlets. Sites used domain impersonation tactics to increase perceived legitimacy of their impersonations (T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0152.003: Website Hosting Platform, T0149.003: Lookalike Domain). | -| [I00108 How you thought you support the animals and you ended up funding white supremacists](../../generated_pages/incidents/I00108.md) | This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account, T0153.005: Online Advertising Platform). | +| [I00108 How you thought you support the animals and you ended up funding white supremacists](../../generated_pages/incidents/I00108.md) | This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website Asset), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account Asset, T0153.005: Online Advertising Platform). | diff --git a/generated_pages/techniques/T0152.004.md b/generated_pages/techniques/T0152.004.md index 5ddcf79..48cdb95 100644 --- a/generated_pages/techniques/T0152.004.md +++ b/generated_pages/techniques/T0152.004.md @@ -1,4 +1,4 @@ -# Technique T0152.004: Website +# Technique T0152.004: Website Asset * **Summary**: A Website is a collection of related web pages hosted on a server and accessible via a web browser. Websites have an associated Domain and can host various types of content, such as text, images, videos, and interactive features.

When a Website is fleshed out, it Presents a Persona to site visitors. For example, the Domain “bbc.co.uk/news” hosts a Website which uses the News Outlet Persona. @@ -7,10 +7,10 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00099 More Women Are Facing The Reality Of Deepfakes, And They’re Ruining Lives](../../generated_pages/incidents/I00099.md) | Last October, British writer Helen was alerted to a series of deepfakes on a porn site that appear to show her engaging in extreme acts of sexual violence. That night, the images replayed themselves over and over in horrific nightmares and she was gripped by an all-consuming feeling of dread. “It’s like you’re in a tunnel, going further and further into this enclosed space, where there’s no light,” she tells Vogue. This feeling pervaded Helen’s life. Whenever she left the house, she felt exposed. On runs, she experienced panic attacks. Helen still has no idea who did this to her.

[...]

Amnesty International has been investigating the effects of abuse against women on Twitter, specifically in relation to how they act online thereafter. According to the charity, abuse creates what they’ve called “the silencing effect” whereby women feel discouraged from participating online. The same can be said for victims of deepfakes.

Helen has never been afraid to use her voice, writing deeply personal accounts of postnatal depression. But the deepfakes created a feeling of shame so strong she thought she’d be carrying this “dirty secret” forever, and so she stopped writing.

[...]

Meanwhile, deepfake ‘communities’ are thriving. There are now dedicated sites, user-friendly apps and organised ‘request’ procedures. Some sites allow you to commission custom deepfakes for £25, while on others you can upload a woman’s image and a bot will strip her naked.

“This violation is not something that should be normalised,” says Gibi, an ASMR artist with 3.13 million YouTube subscribers. Gibi has given up trying to keep tabs on the deepfakes of her. For Gibi, the most egregious part of all of this is the fact that people are “profiting off my face, doing something that I didn’t consent to, like my suffering is your livelihood.” She’s even been approached by a company offering to remove the deepfakes — for £500 a video. This has to end. But how?


A website hosting pornographic content provided users the ability to create deepfake content (T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)).

Another website enabled users to commission custom deepfakes (T0152.004: Website, T0148.004: Payment Processing Capability, T0086.002: Develop AI-Generated Images (Deepfakes), T0155.005: Paid Access). | -| [I00108 How you thought you support the animals and you ended up funding white supremacists](../../generated_pages/incidents/I00108.md) | This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account, T0153.005: Online Advertising Platform). | -| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform). | -| [I00128 #TrollTracker: Outward Influence Operation From Iran](../../generated_pages/incidents/I00128.md) | [Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.

The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.

Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.


In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code), which took users to a document hosted on another website (T0152.004: Website). | +| [I00099 More Women Are Facing The Reality Of Deepfakes, And They’re Ruining Lives](../../generated_pages/incidents/I00099.md) | Last October, British writer Helen was alerted to a series of deepfakes on a porn site that appear to show her engaging in extreme acts of sexual violence. That night, the images replayed themselves over and over in horrific nightmares and she was gripped by an all-consuming feeling of dread. “It’s like you’re in a tunnel, going further and further into this enclosed space, where there’s no light,” she tells Vogue. This feeling pervaded Helen’s life. Whenever she left the house, she felt exposed. On runs, she experienced panic attacks. Helen still has no idea who did this to her.

[...]

Amnesty International has been investigating the effects of abuse against women on Twitter, specifically in relation to how they act online thereafter. According to the charity, abuse creates what they’ve called “the silencing effect” whereby women feel discouraged from participating online. The same can be said for victims of deepfakes.

Helen has never been afraid to use her voice, writing deeply personal accounts of postnatal depression. But the deepfakes created a feeling of shame so strong she thought she’d be carrying this “dirty secret” forever, and so she stopped writing.

[...]

Meanwhile, deepfake ‘communities’ are thriving. There are now dedicated sites, user-friendly apps and organised ‘request’ procedures. Some sites allow you to commission custom deepfakes for £25, while on others you can upload a woman’s image and a bot will strip her naked.

“This violation is not something that should be normalised,” says Gibi, an ASMR artist with 3.13 million YouTube subscribers. Gibi has given up trying to keep tabs on the deepfakes of her. For Gibi, the most egregious part of all of this is the fact that people are “profiting off my face, doing something that I didn’t consent to, like my suffering is your livelihood.” She’s even been approached by a company offering to remove the deepfakes — for £500 a video. This has to end. But how?


A website hosting pornographic content provided users the ability to create deepfake content (T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)).

Another website enabled users to commission custom deepfakes (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0086.002: Develop AI-Generated Images (Deepfakes), T0155.005: Paid Access Asset). | +| [I00108 How you thought you support the animals and you ended up funding white supremacists](../../generated_pages/incidents/I00108.md) | This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website Asset), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account Asset, T0153.005: Online Advertising Platform). | +| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform). | +| [I00128 #TrollTracker: Outward Influence Operation From Iran](../../generated_pages/incidents/I00128.md) | [Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.

The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.

Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.


In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code Asset), which took users to a document hosted on another website (T0152.004: Website Asset). | diff --git a/generated_pages/techniques/T0152.005.md b/generated_pages/techniques/T0152.005.md index df1bf70..74a3241 100644 --- a/generated_pages/techniques/T0152.005.md +++ b/generated_pages/techniques/T0152.005.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00102 Ignore The Poway Synagogue Shooter’s Manifesto: Pay Attention To 8chan’s /pol/ Board](../../generated_pages/incidents/I00102.md) | On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | +| [I00102 Ignore The Poway Synagogue Shooter’s Manifesto: Pay Attention To 8chan’s /pol/ Board](../../generated_pages/incidents/I00102.md) | On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | | [I00104 Macron Campaign Hit With “Massive and Coordinated” Hacking Attack](../../generated_pages/incidents/I00104.md) | A massive trove of documents purporting to contain thousands of emails and other files from the [2017 presidential] campaign of Emmanuel Macron—the French centrist candidate squaring off against right-wing nationalist Marine Le Pen—was posted on the internet Friday afternoon. The Macron campaign says that at least some of the documents are fake. The document dump came just over a day before voting is set to begin in the final round of the election and mere hours before candidates are legally required to stop campaigning.

At about 2:35 p.m. ET, a post appeared on the 4chan online message board announcing the leak. The documents appear to include emails, internal memos, and screenshots of purported banking records.

“In this pastebin are links to torrents of emails between Macron, his team and other officials, politicians as well as original documents and photos,” the anonymous 4chan poster wrote. “This was passed on to me today so now I am giving it to you, the people. The leak is massvie and released in the hopes that the human search engine here will be able to start sifting through the contents and figure out exactly what we have here.”

The Macron campaign issued a statement Friday night saying it was the victim of a “massive and coordinated” hacking attack. That campaign said the leak included some fake documents that were intended “to sow doubt and misinformation.”


Actors posted a to 4chan a link (T0151.012: Image Board Platform, T0146.006: Open Access Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms) to text content hosted on pastebin (T0152.005: Paste Platform, T0146.006: Open Access Platform, T0115: Post Content), which contained links to download stolen and fabricated documents. | diff --git a/generated_pages/techniques/T0152.006.md b/generated_pages/techniques/T0152.006.md index ff8a258..a8a8e27 100644 --- a/generated_pages/techniques/T0152.006.md +++ b/generated_pages/techniques/T0152.006.md @@ -7,8 +7,8 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00096 China ramps up use of AI misinformation](../../generated_pages/incidents/I00096.md) | The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | -| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform). | +| [I00096 China ramps up use of AI misinformation](../../generated_pages/incidents/I00096.md) | The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account Asset, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account Asset, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | +| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform). | diff --git a/generated_pages/techniques/T0152.009.md b/generated_pages/techniques/T0152.009.md index e4514ba..98273a7 100644 --- a/generated_pages/techniques/T0152.009.md +++ b/generated_pages/techniques/T0152.009.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00105 Gaming the System: The Use of Gaming-Adjacent Communication, Game and Mod Platforms by Extremist Actors](../../generated_pages/incidents/I00105.md) | In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Indie DB [is a platform that serves] to present indie games, which are titles from independent, small developer teams, which can be discussed and downloaded [Indie DB].

[...]

[On Indie DB we] found antisemitic, Islamist, sexist and other discriminatory content during the exploration. Both games and comments were located that made positive references to National Socialism. Radicalised users seem to use the opportunities to network, create groups, and communicate in forums. In addition, a number of member profiles with graphic propaganda content could be located. We found several games with propagandistic content advertised on the platform, which caused apparently radicalised users to form a fan community around those games. For example, there are titles such as the antisemitic Fursan Al-Aqsa: The Knights of the Al-Aqsa Mosque, which has won the Best Hardcore Game award at the Game Connection America 2024 Game Development Awards and which has received antisemitic praise on its review page. In the game, players need to target members of the Israeli Defence Forces, and attacks by Palestinian terrorist groups such as Hamas or Lions’ Den can be re-enacted. These include bomb attacks, beheadings and the re-enactment of the terrorist attack in Israel on 7 October 2023. We, therefore, deem Indie DB to be relevant for further analyses of extremist activities in digital gaming spaces.


Indie DB is an online software delivery platform on which users can create groups, and participate in discussion forums (T0152.009: Software Delivery Platform, T0151.002: Online Community Group, T0151.009: Legacy Online Forum Platform). The platform hosted games which allowed players to reenact terrorist attacks (T0147.001: Game)., In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Gamebanana and Mod DB are so-called modding platforms that allow users to post their modifications of existing (popular) games. In the process of modding, highly radicalised content can be inserted into games that did not originally contain it. All of these platforms also have communication functions and customisable profiles.

[...]

During the explorations, several modifications with hateful themes were located, including right-wing extremist, racist, antisemitic and Islamist content. This includes mods that make it possible to play as terrorists or National Socialists. So-called “skins” (textures that change the appearance of models in the game) for characters from first-person shooters are particularly popular and contain references to National Socialism or Islamist terrorist organisations. Although some of this content could be justified with reference to historical accuracy and realism, the user profiles of the creators and commentators often reveal political motivations. Names with neo-Nazi codes or the use of avatars showing members of the Wehrmacht or the Waffen SS, for example, indicate a certain degree of positive appreciation or fascination with right-wing ideology, as do affirmations in the comment columns.

Mod DB in particular has attracted public attention in the past. For example, a mod for the game Half-Life 2 made it possible to play a school shooting with the weapons used during the attacks at Columbine High School (1999) and Virginia Polytechnic Institute and State University (2007). Antisemitic memes and jokes are shared in several groups on the platform. It seems as if users partially connect with each other because of shared political views. There were also indications that Islamist and right-wing extremist users network on the basis of shared views on women, Jews or homosexuals. In addition to relevant usernames and avatars, we found profiles featuring picture galleries, backgrounds and banners dedicated to the SS. Extremist propaganda and radicalisation processes on modding platforms have not been explored yet, but our exploration suggests these digital spaces to be highly relevant for our field.


Mod DB is a platform which allows users to upload mods for games, which other users can download (T0152.009: Software Delivery Platform, T0147.002: Game Mod). | +| [I00105 Gaming the System: The Use of Gaming-Adjacent Communication, Game and Mod Platforms by Extremist Actors](../../generated_pages/incidents/I00105.md) | In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Indie DB [is a platform that serves] to present indie games, which are titles from independent, small developer teams, which can be discussed and downloaded [Indie DB].

[...]

[On Indie DB we] found antisemitic, Islamist, sexist and other discriminatory content during the exploration. Both games and comments were located that made positive references to National Socialism. Radicalised users seem to use the opportunities to network, create groups, and communicate in forums. In addition, a number of member profiles with graphic propaganda content could be located. We found several games with propagandistic content advertised on the platform, which caused apparently radicalised users to form a fan community around those games. For example, there are titles such as the antisemitic Fursan Al-Aqsa: The Knights of the Al-Aqsa Mosque, which has won the Best Hardcore Game award at the Game Connection America 2024 Game Development Awards and which has received antisemitic praise on its review page. In the game, players need to target members of the Israeli Defence Forces, and attacks by Palestinian terrorist groups such as Hamas or Lions’ Den can be re-enacted. These include bomb attacks, beheadings and the re-enactment of the terrorist attack in Israel on 7 October 2023. We, therefore, deem Indie DB to be relevant for further analyses of extremist activities in digital gaming spaces.


Indie DB is an online software delivery platform on which users can create groups, and participate in discussion forums (T0152.009: Software Delivery Platform, T0151.002: Online Community Group, T0151.009: Legacy Online Forum Platform). The platform hosted games which allowed players to reenact terrorist attacks (T0147.001: Game Asset)., In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Gamebanana and Mod DB are so-called modding platforms that allow users to post their modifications of existing (popular) games. In the process of modding, highly radicalised content can be inserted into games that did not originally contain it. All of these platforms also have communication functions and customisable profiles.

[...]

During the explorations, several modifications with hateful themes were located, including right-wing extremist, racist, antisemitic and Islamist content. This includes mods that make it possible to play as terrorists or National Socialists. So-called “skins” (textures that change the appearance of models in the game) for characters from first-person shooters are particularly popular and contain references to National Socialism or Islamist terrorist organisations. Although some of this content could be justified with reference to historical accuracy and realism, the user profiles of the creators and commentators often reveal political motivations. Names with neo-Nazi codes or the use of avatars showing members of the Wehrmacht or the Waffen SS, for example, indicate a certain degree of positive appreciation or fascination with right-wing ideology, as do affirmations in the comment columns.

Mod DB in particular has attracted public attention in the past. For example, a mod for the game Half-Life 2 made it possible to play a school shooting with the weapons used during the attacks at Columbine High School (1999) and Virginia Polytechnic Institute and State University (2007). Antisemitic memes and jokes are shared in several groups on the platform. It seems as if users partially connect with each other because of shared political views. There were also indications that Islamist and right-wing extremist users network on the basis of shared views on women, Jews or homosexuals. In addition to relevant usernames and avatars, we found profiles featuring picture galleries, backgrounds and banners dedicated to the SS. Extremist propaganda and radicalisation processes on modding platforms have not been explored yet, but our exploration suggests these digital spaces to be highly relevant for our field.


Mod DB is a platform which allows users to upload mods for games, which other users can download (T0152.009: Software Delivery Platform, T0147.002: Game Mod Asset). | | [I00123 The Extreme Right on Steam](../../generated_pages/incidents/I00123.md) | Analysis of communities on the gaming platform Steam showed that groups who are known to have engaged in acts of terrorism used Steam to host social communities (T0152.009: Software Delivery Platform, T0151.002: Online Community Group):

The first is a Finnish-language group which was set up to promote the Nordic Resistance Movement (NRM). NRM are the only group in the sample examined by ISD known to have engaged in terrorist attacks. Swedish members of the group conducted a series of bombings in Gothenburg in 2016 and 2017, and several Finnish members are under investigation in relation to both violent attacks and murder.

The NRM Steam group does not host content related to gaming, and instead seems to act as a hub for the movement. The group’s overview section contains a link to the official NRM website, and users are encouraged to find like-minded people to join the group. The group is relatively small, with 87 members, but at the time of writing, it appeared to be active and in use. Interestingly, although the group is in Finnish language, it has members in common with the English language channels identified in this analysis. This suggests that Steam may help facilitate international exchange between right-wing extremists.
, ISD conducted an investigation into the usage of social groups on Steam. Steam is an online platform used to buy and sell digital games, and includes the Steam community feature, which “allows users to find friends and join groups and discussion forums, while also offering in-game voice and text chat”. Actors have used Steam’s social capabilities to enable online harm campaigns:

One function of these Steam groups is the organisation of ‘raids’ – coordinated trolling activity against their political opponents. An example of this can be seen in a white power music group sharing a link to an Israeli Steam group, encouraging other members to “help me raid this juden [German word for Jew] group”. The comments section of said target group show that neo-Nazi and antisemitic comments were consistently posted in the group just two minutes after the instruction had been posted in the extremist group, highlighting the swiftness with which racially motivated harassment can be directed online.

Threat actors used social groups on Steam to organise harassment of targets (T0152.009: Software Delivery Platform, T0151.002: Online Community Group, T0049.005: Conduct Swarming, T0048: Harass)., ISD conducted an investigation into the usage of social groups on Steam. Steam is an online platform used to buy and sell digital games, and includes the Steam community feature, which “allows users to find friends and join groups and discussion forums, while also offering in-game voice and text chat”. Actors have used Steam’s social capabilities to enable online harm campaigns:

A number of groups were observed encouraging members to join conversations on outside platforms. These include links to Telegram channels connected to white supremacist marches, and media outlets, forums and Discord servers run by neo-Nazis.

[...]

This off-ramping activity demonstrates how rather than sitting in isolation, Steam fits into the wider extreme right wing online ecosystem, with Steam groups acting as hubs for communities and organizations which span multiple platforms. Accordingly, although the platform appears to fill a specific role in the building and strengthening of communities with similar hobbies and interests, it is suggested that analysis seeking to determine the risk of these communities should focus on their activity across platforms


Social Groups on Steam were used to drive new people to other neo-Nazi controlled community assets (T0122: Direct Users to Alternative Platforms, T0152.009: Software Delivery Platform, T0151.002: Online Community Group). | diff --git a/generated_pages/techniques/T0152.010.md b/generated_pages/techniques/T0152.010.md index 698ce9d..196ff5a 100644 --- a/generated_pages/techniques/T0152.010.md +++ b/generated_pages/techniques/T0152.010.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00102 Ignore The Poway Synagogue Shooter’s Manifesto: Pay Attention To 8chan’s /pol/ Board](../../generated_pages/incidents/I00102.md) | On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | +| [I00102 Ignore The Poway Synagogue Shooter’s Manifesto: Pay Attention To 8chan’s /pol/ Board](../../generated_pages/incidents/I00102.md) | On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.

Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.

This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.

The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.


Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).

The report looks deeper into 8chan’s /pol/ board:

8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.

[...]

I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.

This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.


Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).

When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.

When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.

This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”

In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”


Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). | diff --git a/generated_pages/techniques/T0152.012.md b/generated_pages/techniques/T0152.012.md index 20289bc..b14bc3e 100644 --- a/generated_pages/techniques/T0152.012.md +++ b/generated_pages/techniques/T0152.012.md @@ -7,9 +7,9 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00110 How COVID-19 conspiracists and extremists use crowdfunding platforms to fund their activities](../../generated_pages/incidents/I00110.md) | The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:



More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | -| [I00111 Patreon Is Bankrolling Climate Change Deniers While We All Burn](../../generated_pages/incidents/I00111.md) | In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access). | -| [I00112 Patreon allows disinformation and conspiracies to be monetised in Spain](../../generated_pages/incidents/I00112.md) | In this report EU DisinfoLab identified 17 Spanish accounts that monetise and/or spread QAnon content or other conspiracy theories on Patreon. Content produced by these accounts go against Patreon’s stated policy of removing creators that “advance disinformation promoting the QAnon conspiracy theory”. EU DisinfoLab found:

In most cases, the creators monetise the content directly on Patreon (posts are only accessible for people sponsoring the creators) but there are also cases of indirect monetization (monetization through links leading to other platforms), an aspect that was flagged and analysed by Eu DisinfoLab in the mentioned previous report.

Some creators display links that redirects users to other platforms such as YouTube or LBRY where they can monetise their content. Some even offer almost all of their videos for free by redirecting to their YouTube channel.

Another modus operandi is for the creators to advertise on Patreon that they are looking for financing through PayPal or provide the author’s email to explore other financing alternatives.

[...]

Sometimes the content offered for a fee on Patreon is freely accessible on other platforms. Creators openly explain that they seek voluntary donation on Patreon, but that their creations will be public on YouTube. This means that the model of these platforms does not always involve a direct monetisation of the content. Creators who have built a strong reputation previously on other platforms can use Patreon as a platform to get some sponsorship which is not related to the content and give them more freedom to create.

Some users explicitly claim to use Patreon as a secondary channel to evade “censorship” by other platforms such as YouTube. Patreon seems to be perceived as a safe haven for disinformation and fringe content that has been suppressed for violating the policies of other platforms. For example, Jorge Guerra points out how Patreon acts as a back-up channel to go to in case of censorship by YouTube. Meanwhile, Alfa Mind openly claims to use Patreon to publish content that is not allowed on YouTube. “Exclusive access to videos that are prohibited on YouTube. These videos are only accessible on Patreon, as their content is very explicit and shocking”, are offered to the patrons who pay €3 per month.


In spite of Patreon’s stated policy, actors use accounts on their platform to generate revenue or donations for their content, and to provide a space to host content which was removed from other platforms (T0146: Account, T0152.012: Subscription Service Platform, T0121.001: Bypass Content Bocking).

Some actors were observed accepting donations via PayPal (T0146: Account, T0148.003: Payment Processing Platform). | +| [I00110 How COVID-19 conspiracists and extremists use crowdfunding platforms to fund their activities](../../generated_pages/incidents/I00110.md) | The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:

More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.

A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).

On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.

We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”


The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).

On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”

An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). | +| [I00111 Patreon Is Bankrolling Climate Change Deniers While We All Burn](../../generated_pages/incidents/I00111.md) | In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account Asset, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access Asset). | +| [I00112 Patreon allows disinformation and conspiracies to be monetised in Spain](../../generated_pages/incidents/I00112.md) | In this report EU DisinfoLab identified 17 Spanish accounts that monetise and/or spread QAnon content or other conspiracy theories on Patreon. Content produced by these accounts go against Patreon’s stated policy of removing creators that “advance disinformation promoting the QAnon conspiracy theory”. EU DisinfoLab found:

In most cases, the creators monetise the content directly on Patreon (posts are only accessible for people sponsoring the creators) but there are also cases of indirect monetization (monetization through links leading to other platforms), an aspect that was flagged and analysed by Eu DisinfoLab in the mentioned previous report.

Some creators display links that redirects users to other platforms such as YouTube or LBRY where they can monetise their content. Some even offer almost all of their videos for free by redirecting to their YouTube channel.

Another modus operandi is for the creators to advertise on Patreon that they are looking for financing through PayPal or provide the author’s email to explore other financing alternatives.

[...]

Sometimes the content offered for a fee on Patreon is freely accessible on other platforms. Creators openly explain that they seek voluntary donation on Patreon, but that their creations will be public on YouTube. This means that the model of these platforms does not always involve a direct monetisation of the content. Creators who have built a strong reputation previously on other platforms can use Patreon as a platform to get some sponsorship which is not related to the content and give them more freedom to create.

Some users explicitly claim to use Patreon as a secondary channel to evade “censorship” by other platforms such as YouTube. Patreon seems to be perceived as a safe haven for disinformation and fringe content that has been suppressed for violating the policies of other platforms. For example, Jorge Guerra points out how Patreon acts as a back-up channel to go to in case of censorship by YouTube. Meanwhile, Alfa Mind openly claims to use Patreon to publish content that is not allowed on YouTube. “Exclusive access to videos that are prohibited on YouTube. These videos are only accessible on Patreon, as their content is very explicit and shocking”, are offered to the patrons who pay €3 per month.


In spite of Patreon’s stated policy, actors use accounts on their platform to generate revenue or donations for their content, and to provide a space to host content which was removed from other platforms (T0146: Account Asset, T0152.012: Subscription Service Platform, T0121.001: Bypass Content Bocking).

Some actors were observed accepting donations via PayPal (T0146: Account Asset, T0148.003: Payment Processing Platform). | diff --git a/generated_pages/techniques/T0153.001.md b/generated_pages/techniques/T0153.001.md index 2cdbbd8..9a1ecbb 100644 --- a/generated_pages/techniques/T0153.001.md +++ b/generated_pages/techniques/T0153.001.md @@ -7,8 +7,8 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00119 Independent journalist publishes Trump campaign document hacked by Iran despite election interference concerns](../../generated_pages/incidents/I00119.md) | An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog, T0150.003: Pre-Existing). | -| [I00121 Operation Overload: how pro-Russian actors flood newsrooms with fake content and seek to divert their efforts](../../generated_pages/incidents/I00121.md) | The unique aspect of Operation Overload is a barrage of emails sent to newsrooms and fact-checkers across Europe. The authors of these messages urge recipients to verify content allegedly found online. The email subject lines often include an incitement to verify the claims briefly described in the message body. This is followed by a short list of links directing recipients to posts on Telegram, X, or known pro-Russian websites, including Pravda and Sputnik.

We have collected 221 emails sent to 20 organisations. The organisations mostly received identical emails urging them to fact-check specific false stories, which demonstrates that the emails were sent as part of a larger coordinated campaign.

[...]

The authors of the emails do not hide their intention to see the fake content widely spread. In February 2024, a journalist at the German outlet CORRECTIV engaged with the sender of one of the emails, providing feedback on the narratives which were originally sent. CORRECTIV received a response from the same Gmail address, initially expressing respect and trust in CORRECTIV’s assessment, while asking: “is it possible for your work to be seen by as many people as possible?”, thereby clearly stating the goal of the operation.

[...]

All the emails come from authors posing as concerned citizens. All emails are sent with Gmail accounts, which is typical for personal use. This makes it challenging to identify the individuals behind these emails, as anyone can open a Gmail account for free. The email headers indicate that the messages were sent from the Gmail interface, not from a personal client which would disclose the sender’s IP address.


In this example, threat actors used gmail accounts (T0146.001: Free Account, T0097.100: Individual Persona, T0143.002: Fabricated Persona, T0153.001: Email Platform) to target journalists and fact-checkers, with the apparent goal of having them amplify operation narratives through fact checks. | +| [I00119 Independent journalist publishes Trump campaign document hacked by Iran despite election interference concerns](../../generated_pages/incidents/I00119.md) | An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.

The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.

For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.

But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.

[...]

Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.

NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.

One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.


In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account Asset, T0097.100: Individual Persona, T0153.001: Email Platform).

The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog Asset, T0150.003: Pre-Existing Asset). | +| [I00121 Operation Overload: how pro-Russian actors flood newsrooms with fake content and seek to divert their efforts](../../generated_pages/incidents/I00121.md) | The unique aspect of Operation Overload is a barrage of emails sent to newsrooms and fact-checkers across Europe. The authors of these messages urge recipients to verify content allegedly found online. The email subject lines often include an incitement to verify the claims briefly described in the message body. This is followed by a short list of links directing recipients to posts on Telegram, X, or known pro-Russian websites, including Pravda and Sputnik.

We have collected 221 emails sent to 20 organisations. The organisations mostly received identical emails urging them to fact-check specific false stories, which demonstrates that the emails were sent as part of a larger coordinated campaign.

[...]

The authors of the emails do not hide their intention to see the fake content widely spread. In February 2024, a journalist at the German outlet CORRECTIV engaged with the sender of one of the emails, providing feedback on the narratives which were originally sent. CORRECTIV received a response from the same Gmail address, initially expressing respect and trust in CORRECTIV’s assessment, while asking: “is it possible for your work to be seen by as many people as possible?”, thereby clearly stating the goal of the operation.

[...]

All the emails come from authors posing as concerned citizens. All emails are sent with Gmail accounts, which is typical for personal use. This makes it challenging to identify the individuals behind these emails, as anyone can open a Gmail account for free. The email headers indicate that the messages were sent from the Gmail interface, not from a personal client which would disclose the sender’s IP address.


In this example, threat actors used gmail accounts (T0146.001: Free Account Asset, T0097.100: Individual Persona, T0143.002: Fabricated Persona, T0153.001: Email Platform) to target journalists and fact-checkers, with the apparent goal of having them amplify operation narratives through fact checks. | diff --git a/generated_pages/techniques/T0153.003.md b/generated_pages/techniques/T0153.003.md index 32660ec..a4f94c9 100644 --- a/generated_pages/techniques/T0153.003.md +++ b/generated_pages/techniques/T0153.003.md @@ -1,4 +1,4 @@ -# Technique T0153.003: Shortened Link +# Technique T0153.003: Shortened Link Asset * **Summary**: A Shortened Link is a custom URL which is typically a shortened version of another URL. diff --git a/generated_pages/techniques/T0153.004.md b/generated_pages/techniques/T0153.004.md index ebc10d6..965fcff 100644 --- a/generated_pages/techniques/T0153.004.md +++ b/generated_pages/techniques/T0153.004.md @@ -1,4 +1,4 @@ -# Technique T0153.004: QR Code +# Technique T0153.004: QR Code Asset * **Summary**: A QR Code allows people to use cameras on their smartphones to open a URL. @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00128 #TrollTracker: Outward Influence Operation From Iran](../../generated_pages/incidents/I00128.md) | [Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.

The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.

Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.


In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code), which took users to a document hosted on another website (T0152.004: Website). | +| [I00128 #TrollTracker: Outward Influence Operation From Iran](../../generated_pages/incidents/I00128.md) | [Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.

The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.

Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.


In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code Asset), which took users to a document hosted on another website (T0152.004: Website Asset). | diff --git a/generated_pages/techniques/T0153.005.md b/generated_pages/techniques/T0153.005.md index b54526f..80100ae 100644 --- a/generated_pages/techniques/T0153.005.md +++ b/generated_pages/techniques/T0153.005.md @@ -8,8 +8,8 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | | [I00097 Report: Not Just Algorithms](../../generated_pages/incidents/I00097.md) | This report explores the role of four systems (recommender systems, content moderation systems, ad approval systems and ad management systems) in creating risks around eating disorders.

[...]

Ad approval systems can create risks. We created 12 ‘fake’ ads that promoted dangerous weight loss techniques and behaviours. We tested to see if these ads would be approved to run, and they were. This means dangerous behaviours can be promoted in paid-for advertising. (Requests to run ads were withdrawn after approval or rejection, so no dangerous advertising was published as a result of this experiment.)

Specifically: On TikTok, 100% of the ads were approved to run; On Facebook, 83% of the ads were approved to run; On Google, 75% of the ads were approved to run.

Ad management systems can create risks. We investigated how platforms allow advertisers to target users, and found that it is possible to target people who may be interested in pro-eating disorder content.

Specifically: On TikTok: End-users who interact with pro-eating disorder content on TikTok, download advertisers’ eating disorder apps or visit their websites can be targeted; On Meta: End-users who interact with pro-eating disorder content on Meta, download advertisers’ eating disorder apps or visit their websites can be targeted; On X: End-users who follow pro- eating disorder accounts, or ‘look’ like them, can be targeted; On Google: End-users who search specific words or combinations of words (including pro-eating disorder words), watch pro-eating disorder YouTube channels and probably those who download eating disorder and mental health apps can be targeted.


Advertising platforms managed by TikTok, Facebook, and Google approved adverts to be displayed on their platforms. These platforms enabled users to deliver targeted advertising to potentially vulnerable platform users (T0018: Purchase Targeted Advertisements, T0153.005: Online Advertising Platform). | -| [I00108 How you thought you support the animals and you ended up funding white supremacists](../../generated_pages/incidents/I00108.md) | This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account, T0153.005: Online Advertising Platform). | -| [I00114 ‘Carol’s Journey’: What Facebook knew about how it radicalized users](../../generated_pages/incidents/I00114.md) | This report examines internal Facebook communications which reveal employees’ concerns about how the platform’s algorithm was recommending users join extremist conspiracy groups.

In summer 2019, a new Facebook user named Carol Smith signed up for the platform, describing herself as a politically conservative mother from Wilmington, North Carolina. Smith’s account indicated an interest in politics, parenting and Christianity and followed a few of her favorite brands, including Fox News and then-President Donald Trump.

Though Smith had never expressed interest in conspiracy theories, in just two days Facebook was recommending she join groups dedicated to QAnon, a sprawling and baseless conspiracy theory and movement that claimed Trump was secretly saving the world from a cabal of pedophiles and Satanists.

Smith didn’t follow the recommended QAnon groups, but whatever algorithm Facebook was using to determine how she should engage with the platform pushed ahead just the same. Within one week, Smith’s feed was full of groups and pages that had violated Facebook’s own rules, including those against hate speech and disinformation.

Smith wasn’t a real person. A researcher employed by Facebook invented the account, along with those of other fictitious “test users” in 2019 and 2020, as part of an experiment in studying the platform’s role in misinforming and polarizing users through its recommendations systems.

That researcher said Smith’s Facebook experience was “a barrage of extreme, conspiratorial, and graphic content.”


Facebook’s Algorithm suggested users join groups which supported the QAnon movement (T0151.001: Social Media Platform, T0151.002: Online Community Group, T0153.006: Content Recommendation Algorithm, T0097.208: Social Cause Persona).

Further investigation by Facebook uncovered that its advertising platform had been used to promote QAnon narratives (T0146: Account, T0114: Deliver Ads, T0153.005: Online Advertising Platform):

For years, company researchers had been running experiments like Carol Smith’s to gauge the platform’s hand in radicalizing users, according to the documents seen by NBC News.

This internal work repeatedly found that recommendation tools pushed users into extremist groups, findings that helped inform policy changes and tweaks to recommendations and news feed rankings. Those rankings are a tentacled, ever-evolving system widely known as “the algorithm” that pushes content to users. But the research at that time stopped well short of inspiring any movement to change the groups and pages themselves.

That reluctance was indicative of “a pattern at Facebook,” Haugen told reporters this month. “They want the shortest path between their current policies and any action.”

[...]

By summer 2020, Facebook was hosting thousands of private QAnon groups and pages, with millions of members and followers, according to an unreleased internal investigation.

A year after the FBI designated QAnon as a potential domestic terrorist threat in the wake of standoffs, alleged planned kidnappings, harassment campaigns and shootings, Facebook labeled QAnon a “Violence Inciting Conspiracy Network” and banned it from the platform, along with militias and other violent social movements. A small team working across several of Facebook’s departments found its platforms had hosted hundreds of ads on Facebook and Instagram worth thousands of dollars and millions of views, “praising, supporting, or representing” the conspiracy theory.

[...]

For many employees inside Facebook, the enforcement came too late, according to posts left on Workplace, the company’s internal message board.

“We’ve known for over a year now that our recommendation systems can very quickly lead users down the path to conspiracy theories and groups,” one integrity researcher, whose name had been redacted, wrote in a post announcing she was leaving the company. “This fringe group has grown to national prominence, with QAnon congressional candidates and QAnon hashtags and groups trending in the mainstream. We were willing to act only * after * things had spiraled into a dire state.”

While Facebook’s ban initially appeared effective, a problem remained: The removal of groups and pages didn’t wipe out QAnon’s most extreme followers, who continued to organize on the platform.

“There was enough evidence to raise red flags in the expert community that Facebook and other platforms failed to address QAnon’s violent extremist dimension,” said Marc-André Argentino, a research fellow at King’s College London’s International Centre for the Study of Radicalisation, who has extensively studied QAnon.

Believers simply rebranded as anti-child-trafficking groups or migrated to other communities, including those around the anti-vaccine movement.

[...]

These conspiracy groups had become the fastest-growing groups on Facebook, according to the report, but Facebook wasn’t able to control their “meteoric growth,” the researchers wrote, “because we were looking at each entity individually, rather than as a cohesive movement.” A Facebook spokesperson told BuzzFeed News it took many steps to limit election misinformation but that it was unable to catch everything.
| +| [I00108 How you thought you support the animals and you ended up funding white supremacists](../../generated_pages/incidents/I00108.md) | This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website Asset), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account Asset, T0153.005: Online Advertising Platform). | +| [I00114 ‘Carol’s Journey’: What Facebook knew about how it radicalized users](../../generated_pages/incidents/I00114.md) | This report examines internal Facebook communications which reveal employees’ concerns about how the platform’s algorithm was recommending users join extremist conspiracy groups.

In summer 2019, a new Facebook user named Carol Smith signed up for the platform, describing herself as a politically conservative mother from Wilmington, North Carolina. Smith’s account indicated an interest in politics, parenting and Christianity and followed a few of her favorite brands, including Fox News and then-President Donald Trump.

Though Smith had never expressed interest in conspiracy theories, in just two days Facebook was recommending she join groups dedicated to QAnon, a sprawling and baseless conspiracy theory and movement that claimed Trump was secretly saving the world from a cabal of pedophiles and Satanists.

Smith didn’t follow the recommended QAnon groups, but whatever algorithm Facebook was using to determine how she should engage with the platform pushed ahead just the same. Within one week, Smith’s feed was full of groups and pages that had violated Facebook’s own rules, including those against hate speech and disinformation.

Smith wasn’t a real person. A researcher employed by Facebook invented the account, along with those of other fictitious “test users” in 2019 and 2020, as part of an experiment in studying the platform’s role in misinforming and polarizing users through its recommendations systems.

That researcher said Smith’s Facebook experience was “a barrage of extreme, conspiratorial, and graphic content.”


Facebook’s Algorithm suggested users join groups which supported the QAnon movement (T0151.001: Social Media Platform, T0151.002: Online Community Group, T0153.006: Content Recommendation Algorithm, T0097.208: Social Cause Persona).

Further investigation by Facebook uncovered that its advertising platform had been used to promote QAnon narratives (T0146: Account Asset, T0114: Deliver Ads, T0153.005: Online Advertising Platform):

For years, company researchers had been running experiments like Carol Smith’s to gauge the platform’s hand in radicalizing users, according to the documents seen by NBC News.

This internal work repeatedly found that recommendation tools pushed users into extremist groups, findings that helped inform policy changes and tweaks to recommendations and news feed rankings. Those rankings are a tentacled, ever-evolving system widely known as “the algorithm” that pushes content to users. But the research at that time stopped well short of inspiring any movement to change the groups and pages themselves.

That reluctance was indicative of “a pattern at Facebook,” Haugen told reporters this month. “They want the shortest path between their current policies and any action.”

[...]

By summer 2020, Facebook was hosting thousands of private QAnon groups and pages, with millions of members and followers, according to an unreleased internal investigation.

A year after the FBI designated QAnon as a potential domestic terrorist threat in the wake of standoffs, alleged planned kidnappings, harassment campaigns and shootings, Facebook labeled QAnon a “Violence Inciting Conspiracy Network” and banned it from the platform, along with militias and other violent social movements. A small team working across several of Facebook’s departments found its platforms had hosted hundreds of ads on Facebook and Instagram worth thousands of dollars and millions of views, “praising, supporting, or representing” the conspiracy theory.

[...]

For many employees inside Facebook, the enforcement came too late, according to posts left on Workplace, the company’s internal message board.

“We’ve known for over a year now that our recommendation systems can very quickly lead users down the path to conspiracy theories and groups,” one integrity researcher, whose name had been redacted, wrote in a post announcing she was leaving the company. “This fringe group has grown to national prominence, with QAnon congressional candidates and QAnon hashtags and groups trending in the mainstream. We were willing to act only * after * things had spiraled into a dire state.”

While Facebook’s ban initially appeared effective, a problem remained: The removal of groups and pages didn’t wipe out QAnon’s most extreme followers, who continued to organize on the platform.

“There was enough evidence to raise red flags in the expert community that Facebook and other platforms failed to address QAnon’s violent extremist dimension,” said Marc-André Argentino, a research fellow at King’s College London’s International Centre for the Study of Radicalisation, who has extensively studied QAnon.

Believers simply rebranded as anti-child-trafficking groups or migrated to other communities, including those around the anti-vaccine movement.

[...]

These conspiracy groups had become the fastest-growing groups on Facebook, according to the report, but Facebook wasn’t able to control their “meteoric growth,” the researchers wrote, “because we were looking at each entity individually, rather than as a cohesive movement.” A Facebook spokesperson told BuzzFeed News it took many steps to limit election misinformation but that it was unable to catch everything.
| diff --git a/generated_pages/techniques/T0153.006.md b/generated_pages/techniques/T0153.006.md index af1990d..612bf7a 100644 --- a/generated_pages/techniques/T0153.006.md +++ b/generated_pages/techniques/T0153.006.md @@ -8,9 +8,9 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | | [I00097 Report: Not Just Algorithms](../../generated_pages/incidents/I00097.md) | This report explores the role of four systems (recommender systems, content moderation systems, ad approval systems and ad management systems) in creating risks around eating disorders.

[...]

Content recommender systems can create risks. We created and primed ‘fake’ accounts for 16-year old Australians and found that some recommender systems will promote pro-eating disorder content to children.

Specifically: On TikTok, 0% of the content recommended was classified as pro-eating disorder content; On Instagram, 23% of the content recommended was classified as pro-eating disorder content; On X, 67% of content recommended was classified as pro-eating disorder content (and disturbingly, another 13% displayed self-harm imagery).


Content recommendation algorithms developed by Instagram (T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm) and X (T0151.008: Microblogging Platform, T0153.006: Content Recommendation Algorithm) promoted harmful content to an account presenting as a 16 year old Australian. | -| [I00108 How you thought you support the animals and you ended up funding white supremacists](../../generated_pages/incidents/I00108.md) | This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account, T0153.005: Online Advertising Platform). | -| [I00114 ‘Carol’s Journey’: What Facebook knew about how it radicalized users](../../generated_pages/incidents/I00114.md) | This report examines internal Facebook communications which reveal employees’ concerns about how the platform’s algorithm was recommending users join extremist conspiracy groups.

In summer 2019, a new Facebook user named Carol Smith signed up for the platform, describing herself as a politically conservative mother from Wilmington, North Carolina. Smith’s account indicated an interest in politics, parenting and Christianity and followed a few of her favorite brands, including Fox News and then-President Donald Trump.

Though Smith had never expressed interest in conspiracy theories, in just two days Facebook was recommending she join groups dedicated to QAnon, a sprawling and baseless conspiracy theory and movement that claimed Trump was secretly saving the world from a cabal of pedophiles and Satanists.

Smith didn’t follow the recommended QAnon groups, but whatever algorithm Facebook was using to determine how she should engage with the platform pushed ahead just the same. Within one week, Smith’s feed was full of groups and pages that had violated Facebook’s own rules, including those against hate speech and disinformation.

Smith wasn’t a real person. A researcher employed by Facebook invented the account, along with those of other fictitious “test users” in 2019 and 2020, as part of an experiment in studying the platform’s role in misinforming and polarizing users through its recommendations systems.

That researcher said Smith’s Facebook experience was “a barrage of extreme, conspiratorial, and graphic content.”


Facebook’s Algorithm suggested users join groups which supported the QAnon movement (T0151.001: Social Media Platform, T0151.002: Online Community Group, T0153.006: Content Recommendation Algorithm, T0097.208: Social Cause Persona).

Further investigation by Facebook uncovered that its advertising platform had been used to promote QAnon narratives (T0146: Account, T0114: Deliver Ads, T0153.005: Online Advertising Platform):

For years, company researchers had been running experiments like Carol Smith’s to gauge the platform’s hand in radicalizing users, according to the documents seen by NBC News.

This internal work repeatedly found that recommendation tools pushed users into extremist groups, findings that helped inform policy changes and tweaks to recommendations and news feed rankings. Those rankings are a tentacled, ever-evolving system widely known as “the algorithm” that pushes content to users. But the research at that time stopped well short of inspiring any movement to change the groups and pages themselves.

That reluctance was indicative of “a pattern at Facebook,” Haugen told reporters this month. “They want the shortest path between their current policies and any action.”

[...]

By summer 2020, Facebook was hosting thousands of private QAnon groups and pages, with millions of members and followers, according to an unreleased internal investigation.

A year after the FBI designated QAnon as a potential domestic terrorist threat in the wake of standoffs, alleged planned kidnappings, harassment campaigns and shootings, Facebook labeled QAnon a “Violence Inciting Conspiracy Network” and banned it from the platform, along with militias and other violent social movements. A small team working across several of Facebook’s departments found its platforms had hosted hundreds of ads on Facebook and Instagram worth thousands of dollars and millions of views, “praising, supporting, or representing” the conspiracy theory.

[...]

For many employees inside Facebook, the enforcement came too late, according to posts left on Workplace, the company’s internal message board.

“We’ve known for over a year now that our recommendation systems can very quickly lead users down the path to conspiracy theories and groups,” one integrity researcher, whose name had been redacted, wrote in a post announcing she was leaving the company. “This fringe group has grown to national prominence, with QAnon congressional candidates and QAnon hashtags and groups trending in the mainstream. We were willing to act only * after * things had spiraled into a dire state.”

While Facebook’s ban initially appeared effective, a problem remained: The removal of groups and pages didn’t wipe out QAnon’s most extreme followers, who continued to organize on the platform.

“There was enough evidence to raise red flags in the expert community that Facebook and other platforms failed to address QAnon’s violent extremist dimension,” said Marc-André Argentino, a research fellow at King’s College London’s International Centre for the Study of Radicalisation, who has extensively studied QAnon.

Believers simply rebranded as anti-child-trafficking groups or migrated to other communities, including those around the anti-vaccine movement.

[...]

These conspiracy groups had become the fastest-growing groups on Facebook, according to the report, but Facebook wasn’t able to control their “meteoric growth,” the researchers wrote, “because we were looking at each entity individually, rather than as a cohesive movement.” A Facebook spokesperson told BuzzFeed News it took many steps to limit election misinformation but that it was unable to catch everything.
| -| [I00115 How Facebook shapes your feed](../../generated_pages/incidents/I00115.md) | This 2021 report by The Washington Post explains the mechanics of Facebook’s algorithm (T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm):

In its early years, Facebook’s algorithm prioritized signals such as likes, clicks and comments to decide which posts to amplify. Publishers, brands and individual users soon learned how to craft posts and headlines designed to induce likes and clicks, giving rise to what came to be known as “clickbait.” By 2013, upstart publishers such as Upworthy and ViralNova were amassing tens of millions of readers with articles designed specifically to game Facebook’s news feed algorithm.

Facebook realized that users were growing wary of misleading teaser headlines, and the company recalibrated its algorithm in 2014 and 2015 to downgrade clickbait and focus on new metrics, such as the amount of time a user spent reading a story or watching a video, and incorporating surveys on what content users found most valuable. Around the same time, its executives identified video as a business priority, and used the algorithm to boost “native” videos shared directly to Facebook. By the mid-2010s, the news feed had tilted toward slick, professionally produced content, especially videos that would hold people’s attention.

In 2016, however, Facebook executives grew worried about a decline in “original sharing.” Users were spending so much time passively watching and reading that they weren’t interacting with each other as much. Young people in particular shifted their personal conversations to rivals such as Snapchat that offered more intimacy.

Once again, Facebook found its answer in the algorithm: It developed a new set of goal metrics that it called “meaningful social interactions,” designed to show users more posts from friends and family, and fewer from big publishers and brands. In particular, the algorithm began to give outsize weight to posts that sparked lots of comments and replies.

The downside of this approach was that the posts that sparked the most comments tended to be the ones that made people angry or offended them, the documents show. Facebook became an angrier, more polarizing place. It didn’t help that, starting in 2017, the algorithm had assigned reaction emoji — including the angry emoji — five times the weight of a simple “like,” according to company documents.

[...]

Internal documents show Facebook researchers found that, for the most politically oriented 1 million American users, nearly 90 percent of the content that Facebook shows them is about politics and social issues. Those groups also received the most misinformation, especially a set of users associated with mostly right-leaning content, who were shown one misinformation post out of every 40, according to a document from June 2020.

One takeaway is that Facebook’s algorithm isn’t a runaway train. The company may not directly control what any given user posts, but by choosing which types of posts will be seen, it sculpts the information landscape according to its business priorities. Some within the company would like to see Facebook use the algorithm to explicitly promote certain values, such as democracy and civil discourse. Others have suggested that it develop and prioritize new metrics that align with users’ values, as with a 2020 experiment in which the algorithm was trained to predict what posts they would find “good for the world” and “bad for the world,” and optimize for the former.
| +| [I00108 How you thought you support the animals and you ended up funding white supremacists](../../generated_pages/incidents/I00108.md) | This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:

Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages

[...]

This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.

[...]

Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.

Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos.


Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).

Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website Asset), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account Asset, T0153.005: Online Advertising Platform). | +| [I00114 ‘Carol’s Journey’: What Facebook knew about how it radicalized users](../../generated_pages/incidents/I00114.md) | This report examines internal Facebook communications which reveal employees’ concerns about how the platform’s algorithm was recommending users join extremist conspiracy groups.

In summer 2019, a new Facebook user named Carol Smith signed up for the platform, describing herself as a politically conservative mother from Wilmington, North Carolina. Smith’s account indicated an interest in politics, parenting and Christianity and followed a few of her favorite brands, including Fox News and then-President Donald Trump.

Though Smith had never expressed interest in conspiracy theories, in just two days Facebook was recommending she join groups dedicated to QAnon, a sprawling and baseless conspiracy theory and movement that claimed Trump was secretly saving the world from a cabal of pedophiles and Satanists.

Smith didn’t follow the recommended QAnon groups, but whatever algorithm Facebook was using to determine how she should engage with the platform pushed ahead just the same. Within one week, Smith’s feed was full of groups and pages that had violated Facebook’s own rules, including those against hate speech and disinformation.

Smith wasn’t a real person. A researcher employed by Facebook invented the account, along with those of other fictitious “test users” in 2019 and 2020, as part of an experiment in studying the platform’s role in misinforming and polarizing users through its recommendations systems.

That researcher said Smith’s Facebook experience was “a barrage of extreme, conspiratorial, and graphic content.”


Facebook’s Algorithm suggested users join groups which supported the QAnon movement (T0151.001: Social Media Platform, T0151.002: Online Community Group, T0153.006: Content Recommendation Algorithm, T0097.208: Social Cause Persona).

Further investigation by Facebook uncovered that its advertising platform had been used to promote QAnon narratives (T0146: Account Asset, T0114: Deliver Ads, T0153.005: Online Advertising Platform):

For years, company researchers had been running experiments like Carol Smith’s to gauge the platform’s hand in radicalizing users, according to the documents seen by NBC News.

This internal work repeatedly found that recommendation tools pushed users into extremist groups, findings that helped inform policy changes and tweaks to recommendations and news feed rankings. Those rankings are a tentacled, ever-evolving system widely known as “the algorithm” that pushes content to users. But the research at that time stopped well short of inspiring any movement to change the groups and pages themselves.

That reluctance was indicative of “a pattern at Facebook,” Haugen told reporters this month. “They want the shortest path between their current policies and any action.”

[...]

By summer 2020, Facebook was hosting thousands of private QAnon groups and pages, with millions of members and followers, according to an unreleased internal investigation.

A year after the FBI designated QAnon as a potential domestic terrorist threat in the wake of standoffs, alleged planned kidnappings, harassment campaigns and shootings, Facebook labeled QAnon a “Violence Inciting Conspiracy Network” and banned it from the platform, along with militias and other violent social movements. A small team working across several of Facebook’s departments found its platforms had hosted hundreds of ads on Facebook and Instagram worth thousands of dollars and millions of views, “praising, supporting, or representing” the conspiracy theory.

[...]

For many employees inside Facebook, the enforcement came too late, according to posts left on Workplace, the company’s internal message board.

“We’ve known for over a year now that our recommendation systems can very quickly lead users down the path to conspiracy theories and groups,” one integrity researcher, whose name had been redacted, wrote in a post announcing she was leaving the company. “This fringe group has grown to national prominence, with QAnon congressional candidates and QAnon hashtags and groups trending in the mainstream. We were willing to act only * after * things had spiraled into a dire state.”

While Facebook’s ban initially appeared effective, a problem remained: The removal of groups and pages didn’t wipe out QAnon’s most extreme followers, who continued to organize on the platform.

“There was enough evidence to raise red flags in the expert community that Facebook and other platforms failed to address QAnon’s violent extremist dimension,” said Marc-André Argentino, a research fellow at King’s College London’s International Centre for the Study of Radicalisation, who has extensively studied QAnon.

Believers simply rebranded as anti-child-trafficking groups or migrated to other communities, including those around the anti-vaccine movement.

[...]

These conspiracy groups had become the fastest-growing groups on Facebook, according to the report, but Facebook wasn’t able to control their “meteoric growth,” the researchers wrote, “because we were looking at each entity individually, rather than as a cohesive movement.” A Facebook spokesperson told BuzzFeed News it took many steps to limit election misinformation but that it was unable to catch everything.
| +| [I00115 How Facebook shapes your feed](../../generated_pages/incidents/I00115.md) | This 2021 report by The Washington Post explains the mechanics of Facebook’s algorithm (T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm):

In its early years, Facebook’s algorithm prioritized signals such as likes, clicks and comments to decide which posts to amplify. Publishers, brands and individual users soon learned how to craft posts and headlines designed to induce likes and clicks, giving rise to what came to be known as “clickbait.” By 2013, upstart publishers such as Upworthy and ViralNova were amassing tens of millions of readers with articles designed specifically to game Facebook’s news feed algorithm.

Facebook realized that users were growing wary of misleading teaser headlines, and the company recalibrated its algorithm in 2014 and 2015 to downgrade clickbait and focus on new metrics, such as the amount of time a user spent reading a story or watching a video, and incorporating surveys on what content users found most valuable. Around the same time, its executives identified video as a business priority, and used the algorithm to boost “native” videos shared directly to Facebook. By the mid-2010s, the news feed had tilted toward slick, professionally produced content, especially videos that would hold people’s attention.

In 2016, however, Facebook executives grew worried about a decline in “original sharing.” Users were spending so much time passively watching and reading that they weren’t interacting with each other as much. Young people in particular shifted their personal conversations to rivals such as Snapchat that offered more intimacy.

Once again, Facebook found its answer in the algorithm: It developed a new set of goal metrics that it called “meaningful social interactions,” designed to show users more posts from friends and family, and fewer from big publishers and brands. In particular, the algorithm began to give outsize weight to posts that sparked lots of comments and replies.

The downside of this approach was that the posts that sparked the most comments tended to be the ones that made people angry or offended them, the documents show. Facebook became an angrier, more polarizing place. It didn’t help that, starting in 2017, the algorithm had assigned reaction emoji — including the angry emoji — five times the weight of a simple “like,” according to company documents.

[...]

Internal documents show Facebook researchers found that, for the most politically oriented 1 million American users, nearly 90 percent of the content that Facebook shows them is about politics and social issues. Those groups also received the most misinformation, especially a set of users associated with mostly right-leaning content, who were shown one misinformation post out of every 40, according to a document from June 2020.

One takeaway is that Facebook’s algorithm isn’t a runaway train. The company may not directly control what any given user posts, but by choosing which types of posts will be seen, it sculpts the information landscape according to its business priorities. Some within the company would like to see Facebook use the algorithm to explicitly promote certain values, such as democracy and civil discourse. Others have suggested that it develop and prioritize new metrics that align with users’ values, as with a 2020 experiment in which the algorithm was trained to predict what posts they would find “good for the world” and “bad for the world,” and optimize for the former.
| diff --git a/generated_pages/techniques/T0154.002.md b/generated_pages/techniques/T0154.002.md index 39cd744..82f6ab9 100644 --- a/generated_pages/techniques/T0154.002.md +++ b/generated_pages/techniques/T0154.002.md @@ -7,10 +7,10 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00096 China ramps up use of AI misinformation](../../generated_pages/incidents/I00096.md) | The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | -| [I00099 More Women Are Facing The Reality Of Deepfakes, And They’re Ruining Lives](../../generated_pages/incidents/I00099.md) | Last October, British writer Helen was alerted to a series of deepfakes on a porn site that appear to show her engaging in extreme acts of sexual violence. That night, the images replayed themselves over and over in horrific nightmares and she was gripped by an all-consuming feeling of dread. “It’s like you’re in a tunnel, going further and further into this enclosed space, where there’s no light,” she tells Vogue. This feeling pervaded Helen’s life. Whenever she left the house, she felt exposed. On runs, she experienced panic attacks. Helen still has no idea who did this to her.

[...]

Amnesty International has been investigating the effects of abuse against women on Twitter, specifically in relation to how they act online thereafter. According to the charity, abuse creates what they’ve called “the silencing effect” whereby women feel discouraged from participating online. The same can be said for victims of deepfakes.

Helen has never been afraid to use her voice, writing deeply personal accounts of postnatal depression. But the deepfakes created a feeling of shame so strong she thought she’d be carrying this “dirty secret” forever, and so she stopped writing.

[...]

Meanwhile, deepfake ‘communities’ are thriving. There are now dedicated sites, user-friendly apps and organised ‘request’ procedures. Some sites allow you to commission custom deepfakes for £25, while on others you can upload a woman’s image and a bot will strip her naked.

“This violation is not something that should be normalised,” says Gibi, an ASMR artist with 3.13 million YouTube subscribers. Gibi has given up trying to keep tabs on the deepfakes of her. For Gibi, the most egregious part of all of this is the fact that people are “profiting off my face, doing something that I didn’t consent to, like my suffering is your livelihood.” She’s even been approached by a company offering to remove the deepfakes — for £500 a video. This has to end. But how?


A website hosting pornographic content provided users the ability to create deepfake content (T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)).

Another website enabled users to commission custom deepfakes (T0152.004: Website, T0148.004: Payment Processing Capability, T0086.002: Develop AI-Generated Images (Deepfakes), T0155.005: Paid Access). | +| [I00096 China ramps up use of AI misinformation](../../generated_pages/incidents/I00096.md) | The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:

On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.

Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account Asset, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).

Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.

Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account Asset, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). | +| [I00099 More Women Are Facing The Reality Of Deepfakes, And They’re Ruining Lives](../../generated_pages/incidents/I00099.md) | Last October, British writer Helen was alerted to a series of deepfakes on a porn site that appear to show her engaging in extreme acts of sexual violence. That night, the images replayed themselves over and over in horrific nightmares and she was gripped by an all-consuming feeling of dread. “It’s like you’re in a tunnel, going further and further into this enclosed space, where there’s no light,” she tells Vogue. This feeling pervaded Helen’s life. Whenever she left the house, she felt exposed. On runs, she experienced panic attacks. Helen still has no idea who did this to her.

[...]

Amnesty International has been investigating the effects of abuse against women on Twitter, specifically in relation to how they act online thereafter. According to the charity, abuse creates what they’ve called “the silencing effect” whereby women feel discouraged from participating online. The same can be said for victims of deepfakes.

Helen has never been afraid to use her voice, writing deeply personal accounts of postnatal depression. But the deepfakes created a feeling of shame so strong she thought she’d be carrying this “dirty secret” forever, and so she stopped writing.

[...]

Meanwhile, deepfake ‘communities’ are thriving. There are now dedicated sites, user-friendly apps and organised ‘request’ procedures. Some sites allow you to commission custom deepfakes for £25, while on others you can upload a woman’s image and a bot will strip her naked.

“This violation is not something that should be normalised,” says Gibi, an ASMR artist with 3.13 million YouTube subscribers. Gibi has given up trying to keep tabs on the deepfakes of her. For Gibi, the most egregious part of all of this is the fact that people are “profiting off my face, doing something that I didn’t consent to, like my suffering is your livelihood.” She’s even been approached by a company offering to remove the deepfakes — for £500 a video. This has to end. But how?


A website hosting pornographic content provided users the ability to create deepfake content (T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)).

Another website enabled users to commission custom deepfakes (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0086.002: Develop AI-Generated Images (Deepfakes), T0155.005: Paid Access Asset). | | [I00100 Why ThisPersonDoesNotExist (and its copycats) need to be restricted](../../generated_pages/incidents/I00100.md) | You might have heard about the recent viral sensation, ThisPersonDoesNotExist.com, a website, launched two weeks ago, that uses Nvidia’s publicly available artificial intelligence technology to draw an invented, photo-realistic human being with each refresh. The tech is impressive and artistically evocative. It’s also irresponsible and needs to be restricted immediately.

[...]

Prior to this technology, scammers faced three major risks when using fake photos. Each of these risks had the potential to put them out business, or in jail.

Risk #1: Someone recognizes the photo. While the odds of this are long-shot, it does happen.

Risk #2: Someone reverse image searches the photo with a service like TinEye or Google Image Search and finds that it’s been posted elsewhere. Reverse image search is one of the top anti-fraud measures recommended by consumer protection advocates.

Risk #3: If the crime is successful, law enforcement uses the fake photo to figure out the scammer’s identity after the fact. Perhaps the scammer used an old classmate’s photo. Perhaps their personal account follows the Instagram member they pilfered. And so on: people make mistakes.

The problem with AI-generated photos is that they carry none of these risks. No one will recognize a human who’s never existed before. Google Image Search will return 0 results, possibly instilling a false sense of security in the searcher. And AI-generated photos don’t give law enforcement much to work with.


ThisPersonDoesNotExist is an online platform which, when visited, produces AI generated images of peoples’ faces (T0146.006: Open Access Platform, T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)). | -| [I00103 The racist AI deepfake that fooled and divided a community](../../generated_pages/incidents/I00103.md) | “I seriously don't understand why I have to constantly put up with these dumbasses here every day.”

So began what appeared to be a long tirade from the principal of Pikesville High School, punctuated with racist, antisemitic and offensive tropes. It sounded like it had been secretly recorded.

The speaker went on to bemoan “ungrateful black kids” and Jewish people in the community.

The clip, first posted in [January 2024], went viral nationally. But it really struck a nerve in the peaceful, leafy suburb of Pikesville, which has large black and Jewish populations, and in the nearby city of Baltimore, Maryland. Principal Eric Eiswert was put on paid administrative leave pending an investigation.

[...]

But what those sharing the clip didn’t realise at the time was that another bombshell was about to drop: the clip was an AI-generated fake.

[...]

[In April 2024], Baltimore Police Chief Robert McCullough confirmed they now had “conclusive evidence that the recording was not authentic”.

And they believed they knew who made the fake.

Police charged 31-year-old Dazhon Darien, the school’s athletics director, with several counts related to the fake video. Charges included theft, retaliating against a witness and stalking.

He was arrested at the airport, where police say he was planning to fly to Houston, Texas.

Police say that Mr Darien had been under investigation by Principal Eiswert over an alleged theft of $1,916 (£1,460) from the school. They also allege there had been “work performance challenges” and his contract was likely not to be renewed.

Their theory was that by creating the deepfake recording, he hoped to discredit the principal before he could be fired.

Investigators say they traced an email used to send the original video to a server connected to Mr Darien, and allege that he used Baltimore County Public Schools' computer network to access AI tools. He is due to stand trial in December 2024.


By associating Mr Darien to the server used to email the original AI generated audio, investigators link Darien to the fabricated content (T0149.005: Server, T0088.001: AI Generated Audio (Deepfakes)). They also assert that Darien used computers owned by the school to access platforms used to generate the audio (T0146: Account, T0154.002: AI Media Platform). | +| [I00103 The racist AI deepfake that fooled and divided a community](../../generated_pages/incidents/I00103.md) | “I seriously don't understand why I have to constantly put up with these dumbasses here every day.”

So began what appeared to be a long tirade from the principal of Pikesville High School, punctuated with racist, antisemitic and offensive tropes. It sounded like it had been secretly recorded.

The speaker went on to bemoan “ungrateful black kids” and Jewish people in the community.

The clip, first posted in [January 2024], went viral nationally. But it really struck a nerve in the peaceful, leafy suburb of Pikesville, which has large black and Jewish populations, and in the nearby city of Baltimore, Maryland. Principal Eric Eiswert was put on paid administrative leave pending an investigation.

[...]

But what those sharing the clip didn’t realise at the time was that another bombshell was about to drop: the clip was an AI-generated fake.

[...]

[In April 2024], Baltimore Police Chief Robert McCullough confirmed they now had “conclusive evidence that the recording was not authentic”.

And they believed they knew who made the fake.

Police charged 31-year-old Dazhon Darien, the school’s athletics director, with several counts related to the fake video. Charges included theft, retaliating against a witness and stalking.

He was arrested at the airport, where police say he was planning to fly to Houston, Texas.

Police say that Mr Darien had been under investigation by Principal Eiswert over an alleged theft of $1,916 (£1,460) from the school. They also allege there had been “work performance challenges” and his contract was likely not to be renewed.

Their theory was that by creating the deepfake recording, he hoped to discredit the principal before he could be fired.

Investigators say they traced an email used to send the original video to a server connected to Mr Darien, and allege that he used Baltimore County Public Schools' computer network to access AI tools. He is due to stand trial in December 2024.


By associating Mr Darien to the server used to email the original AI generated audio, investigators link Darien to the fabricated content (T0149.005: Server Asset, T0088.001: AI Generated Audio (Deepfakes)). They also assert that Darien used computers owned by the school to access platforms used to generate the audio (T0146: Account Asset, T0154.002: AI Media Platform). | diff --git a/generated_pages/techniques/T0155.001.md b/generated_pages/techniques/T0155.001.md index 29c4205..41786d0 100644 --- a/generated_pages/techniques/T0155.001.md +++ b/generated_pages/techniques/T0155.001.md @@ -1,4 +1,4 @@ -# Technique T0155.001: Password Gated +# Technique T0155.001: Password Gated Asset * **Summary**: A Password Gated Asset is an online asset which requires a password to gain access.

Examples include password protected Servers set up to be a File Hosting Platform, or password protected Community Sub-Forums. diff --git a/generated_pages/techniques/T0155.002.md b/generated_pages/techniques/T0155.002.md index 8e1fb09..f7b2cb0 100644 --- a/generated_pages/techniques/T0155.002.md +++ b/generated_pages/techniques/T0155.002.md @@ -1,4 +1,4 @@ -# Technique T0155.002: Invite Gated +# Technique T0155.002: Invite Gated Asset * **Summary**: An Invite Gated Asset is an online asset which requires an existing user to invite other users for access to the asset.

Examples include Chat Groups in which Administrator Accounts are able to add or remove users, or File Hosting Platforms which allow users to invite other users to access their files. diff --git a/generated_pages/techniques/T0155.003.md b/generated_pages/techniques/T0155.003.md index 0adab9b..ffa024c 100644 --- a/generated_pages/techniques/T0155.003.md +++ b/generated_pages/techniques/T0155.003.md @@ -1,4 +1,4 @@ -# Technique T0155.003: Approval Gated +# Technique T0155.003: Approval Gated Asset * **Summary**: An Approval Gated Asset is an online asset which requires approval from Administrator Accounts for access to the asset.

Examples include Online Community Groups on Facebook, which can be configured to require questions and approval before access, and Accounts on Social Media Platforms such as Instagram, which allow users to set their accounts as visible to approved friends only. @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00105 Gaming the System: The Use of Gaming-Adjacent Communication, Game and Mod Platforms by Extremist Actors](../../generated_pages/incidents/I00105.md) | In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Gamer Uprising Forums (GUF) [is an online discussion platform using the classic forum structure] aimed directly at gamers. It is run by US Neo-Nazi Andrew Anglin and explicitly targets politically right-wing gamers. This forum mainly includes antisemitic, sexist, and racist topics, but also posts on related issues such as esotericism, conspiracy narratives, pro-Russian propaganda, alternative medicine, Christian religion, content related to the incel- and manosphere, lists of criminal offences committed by non-white people, links to right-wing news sites, homophobia and trans-hostility, troll guides, anti-leftism, ableism and much more. Most noticeable were the high number of antisemitic references. For example, there is a thread with hundreds of machine-generated images, most of which feature openly antisemitic content and popular antisemitic references. Many users chose explicitly antisemitic avatars. Some of the usernames also provide clues to the users’ ideologies and profiles feature swastikas as a type of progress bar and indicator of the user’s activity in the forum.

The GUF’s front page contains an overview of the forum, user statistics, and so-called “announcements”. In addition to advice-like references, these feature various expressions of hateful ideologies. At the time of the exploration, the following could be read there: “Jews are the problem!”, “Women should be raped”, “The Jews are going to be required to return stolen property”, “Immigrants will have to be physically removed”, “Console gaming is for n******” and “Anger is a womanly emotion”. New users have to prove themselves in an area for newcomers referred to in imageboard slang as the “Newfag Barn”. Only when the newcomers’ posts have received a substantial number of likes from established users, are they allowed to post in other parts of the forum. It can be assumed that this will also lead to competitions to outdo each other in posting extreme content. However, it is always possible to view all posts and content on the site. In any case, it can be assumed that the platform hardly addresses milieus that are not already radicalised or at risk of radicalisation and is therefore deemed relevant for radicalisation research. However, the number of registered users is low (typical for radicalised milieus) and, hence, the platform may only be of interest when studying a small group of highly radicalised individuals.


Gamer Uprising Forum is a legacy online forum, with access gated behind approval of existing platform users (T0155.003: Approval Gated, T0151.009: Legacy Online Forum Platform). | +| [I00105 Gaming the System: The Use of Gaming-Adjacent Communication, Game and Mod Platforms by Extremist Actors](../../generated_pages/incidents/I00105.md) | In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:

Gamer Uprising Forums (GUF) [is an online discussion platform using the classic forum structure] aimed directly at gamers. It is run by US Neo-Nazi Andrew Anglin and explicitly targets politically right-wing gamers. This forum mainly includes antisemitic, sexist, and racist topics, but also posts on related issues such as esotericism, conspiracy narratives, pro-Russian propaganda, alternative medicine, Christian religion, content related to the incel- and manosphere, lists of criminal offences committed by non-white people, links to right-wing news sites, homophobia and trans-hostility, troll guides, anti-leftism, ableism and much more. Most noticeable were the high number of antisemitic references. For example, there is a thread with hundreds of machine-generated images, most of which feature openly antisemitic content and popular antisemitic references. Many users chose explicitly antisemitic avatars. Some of the usernames also provide clues to the users’ ideologies and profiles feature swastikas as a type of progress bar and indicator of the user’s activity in the forum.

The GUF’s front page contains an overview of the forum, user statistics, and so-called “announcements”. In addition to advice-like references, these feature various expressions of hateful ideologies. At the time of the exploration, the following could be read there: “Jews are the problem!”, “Women should be raped”, “The Jews are going to be required to return stolen property”, “Immigrants will have to be physically removed”, “Console gaming is for n******” and “Anger is a womanly emotion”. New users have to prove themselves in an area for newcomers referred to in imageboard slang as the “Newfag Barn”. Only when the newcomers’ posts have received a substantial number of likes from established users, are they allowed to post in other parts of the forum. It can be assumed that this will also lead to competitions to outdo each other in posting extreme content. However, it is always possible to view all posts and content on the site. In any case, it can be assumed that the platform hardly addresses milieus that are not already radicalised or at risk of radicalisation and is therefore deemed relevant for radicalisation research. However, the number of registered users is low (typical for radicalised milieus) and, hence, the platform may only be of interest when studying a small group of highly radicalised individuals.


Gamer Uprising Forum is a legacy online forum, with access gated behind approval of existing platform users (T0155.003: Approval Gated Asset, T0151.009: Legacy Online Forum Platform). | diff --git a/generated_pages/techniques/T0155.004.md b/generated_pages/techniques/T0155.004.md index 8474ce0..ebe070f 100644 --- a/generated_pages/techniques/T0155.004.md +++ b/generated_pages/techniques/T0155.004.md @@ -1,4 +1,4 @@ -# Technique T0155.004: Geoblocked +# Technique T0155.004: Geoblocked Asset * **Summary**: A Geoblocked Asset is an online asset which cannot be accessed in specific geographical locations.

Assets can be Geoblocked by choice of the platform, or can have Geoblocking mandated by regulators, and enforced through Internet Service Providers. @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00072 Behind the Dutch Terror Threat Video: The St. Petersburg "Troll Factory" Connection](../../generated_pages/incidents/I00072.md) | “The creator of Geopolitika[.]ru is Aleksandr Dugin, who was sanctioned by the United States Department of Treasury in 2015 for his role in the Eurasian Youth Union “for being responsible for or complicit in actions or policies that threaten the peace, security, stability, or sovereignty or territorial integrity of Ukraine.”

[...]

“Currently, the website geopolika[.]ru redirects directly to another partner website, Katehon.

“Katehon poses itself as a think tank focused on geopolitics in an English edition of its website. In contrast, in Russian, it states its aim to develop “ideological, political, diplomatic, economic and military strategy for Russia of the future” with a special role of religion. The president of Katehon’s supervisory board is Konstantin Malofeev, a Russian millionaire with connections to the Russian orthodox church and presidential administration, who founded Tsargrad TV, a known source of disinformation. Malofeev was sanctioned by the U.S. Department of Treasury and the European Union in 2014 for material support and financial backing of Russian-backed separatists in eastern Ukraine. Another known figure from the board is Sergei Glaziev, former advisor to Putin in 2012–2019. Dugin is also on the board in the Russian edition of the website, whereas he is omitted in English.”
In this example a domain managed by an actor previously sanctioned by the US department of treasury has been reconfigured to redirect to another website; Katehon (T0149.004: Redirecting Domain, T0150.004: Repurposed).

Katehon presents itself as a geopolitical think tank in English, but does not maintain this persona when presenting itself to a Russian speaking audience (T0097.204: Think Tank Persona, T0152.004: Website, T0155.004: Geoblocked). | +| [I00072 Behind the Dutch Terror Threat Video: The St. Petersburg "Troll Factory" Connection](../../generated_pages/incidents/I00072.md) | “The creator of Geopolitika[.]ru is Aleksandr Dugin, who was sanctioned by the United States Department of Treasury in 2015 for his role in the Eurasian Youth Union “for being responsible for or complicit in actions or policies that threaten the peace, security, stability, or sovereignty or territorial integrity of Ukraine.”

[...]

“Currently, the website geopolika[.]ru redirects directly to another partner website, Katehon.

“Katehon poses itself as a think tank focused on geopolitics in an English edition of its website. In contrast, in Russian, it states its aim to develop “ideological, political, diplomatic, economic and military strategy for Russia of the future” with a special role of religion. The president of Katehon’s supervisory board is Konstantin Malofeev, a Russian millionaire with connections to the Russian orthodox church and presidential administration, who founded Tsargrad TV, a known source of disinformation. Malofeev was sanctioned by the U.S. Department of Treasury and the European Union in 2014 for material support and financial backing of Russian-backed separatists in eastern Ukraine. Another known figure from the board is Sergei Glaziev, former advisor to Putin in 2012–2019. Dugin is also on the board in the Russian edition of the website, whereas he is omitted in English.”


In this example a domain managed by an actor previously sanctioned by the US department of treasury has been reconfigured to redirect to another website; Katehon (T0149.004: Redirecting Domain Asset, T0150.004: Repurposed Asset).

Katehon presents itself as a geopolitical think tank in English, but does not maintain this persona when presenting itself to a Russian speaking audience (T0097.204: Think Tank Persona, T0152.004: Website Asset, T0155.004: Geoblocked Asset). | diff --git a/generated_pages/techniques/T0155.005.md b/generated_pages/techniques/T0155.005.md index 203dfcc..17ceb89 100644 --- a/generated_pages/techniques/T0155.005.md +++ b/generated_pages/techniques/T0155.005.md @@ -1,4 +1,4 @@ -# Technique T0155.005: Paid Access +# Technique T0155.005: Paid Access Asset * **Summary**: A Paid Access Asset is an online asset which requires a single payment for permanent access to the asset. @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00099 More Women Are Facing The Reality Of Deepfakes, And They’re Ruining Lives](../../generated_pages/incidents/I00099.md) | Last October, British writer Helen was alerted to a series of deepfakes on a porn site that appear to show her engaging in extreme acts of sexual violence. That night, the images replayed themselves over and over in horrific nightmares and she was gripped by an all-consuming feeling of dread. “It’s like you’re in a tunnel, going further and further into this enclosed space, where there’s no light,” she tells Vogue. This feeling pervaded Helen’s life. Whenever she left the house, she felt exposed. On runs, she experienced panic attacks. Helen still has no idea who did this to her.

[...]

Amnesty International has been investigating the effects of abuse against women on Twitter, specifically in relation to how they act online thereafter. According to the charity, abuse creates what they’ve called “the silencing effect” whereby women feel discouraged from participating online. The same can be said for victims of deepfakes.

Helen has never been afraid to use her voice, writing deeply personal accounts of postnatal depression. But the deepfakes created a feeling of shame so strong she thought she’d be carrying this “dirty secret” forever, and so she stopped writing.

[...]

Meanwhile, deepfake ‘communities’ are thriving. There are now dedicated sites, user-friendly apps and organised ‘request’ procedures. Some sites allow you to commission custom deepfakes for £25, while on others you can upload a woman’s image and a bot will strip her naked.

“This violation is not something that should be normalised,” says Gibi, an ASMR artist with 3.13 million YouTube subscribers. Gibi has given up trying to keep tabs on the deepfakes of her. For Gibi, the most egregious part of all of this is the fact that people are “profiting off my face, doing something that I didn’t consent to, like my suffering is your livelihood.” She’s even been approached by a company offering to remove the deepfakes — for £500 a video. This has to end. But how?


A website hosting pornographic content provided users the ability to create deepfake content (T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)).

Another website enabled users to commission custom deepfakes (T0152.004: Website, T0148.004: Payment Processing Capability, T0086.002: Develop AI-Generated Images (Deepfakes), T0155.005: Paid Access). | +| [I00099 More Women Are Facing The Reality Of Deepfakes, And They’re Ruining Lives](../../generated_pages/incidents/I00099.md) | Last October, British writer Helen was alerted to a series of deepfakes on a porn site that appear to show her engaging in extreme acts of sexual violence. That night, the images replayed themselves over and over in horrific nightmares and she was gripped by an all-consuming feeling of dread. “It’s like you’re in a tunnel, going further and further into this enclosed space, where there’s no light,” she tells Vogue. This feeling pervaded Helen’s life. Whenever she left the house, she felt exposed. On runs, she experienced panic attacks. Helen still has no idea who did this to her.

[...]

Amnesty International has been investigating the effects of abuse against women on Twitter, specifically in relation to how they act online thereafter. According to the charity, abuse creates what they’ve called “the silencing effect” whereby women feel discouraged from participating online. The same can be said for victims of deepfakes.

Helen has never been afraid to use her voice, writing deeply personal accounts of postnatal depression. But the deepfakes created a feeling of shame so strong she thought she’d be carrying this “dirty secret” forever, and so she stopped writing.

[...]

Meanwhile, deepfake ‘communities’ are thriving. There are now dedicated sites, user-friendly apps and organised ‘request’ procedures. Some sites allow you to commission custom deepfakes for £25, while on others you can upload a woman’s image and a bot will strip her naked.

“This violation is not something that should be normalised,” says Gibi, an ASMR artist with 3.13 million YouTube subscribers. Gibi has given up trying to keep tabs on the deepfakes of her. For Gibi, the most egregious part of all of this is the fact that people are “profiting off my face, doing something that I didn’t consent to, like my suffering is your livelihood.” She’s even been approached by a company offering to remove the deepfakes — for £500 a video. This has to end. But how?


A website hosting pornographic content provided users the ability to create deepfake content (T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)).

Another website enabled users to commission custom deepfakes (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0086.002: Develop AI-Generated Images (Deepfakes), T0155.005: Paid Access Asset). | diff --git a/generated_pages/techniques/T0155.006.md b/generated_pages/techniques/T0155.006.md index 6273dec..08cef6f 100644 --- a/generated_pages/techniques/T0155.006.md +++ b/generated_pages/techniques/T0155.006.md @@ -1,4 +1,4 @@ -# Technique T0155.006: Subscription Access +# Technique T0155.006: Subscription Access Asset * **Summary**: A Subscription Access Asset is an online asset which requires a continued subscription for access to the asset.

Examples include the Blogging Platform Substack, which affords Blogs hosted on their platform the ability to produce subscriber-only posts, and the Subscription Service Platform Patreon. @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00111 Patreon Is Bankrolling Climate Change Deniers While We All Burn](../../generated_pages/incidents/I00111.md) | In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access). | +| [I00111 Patreon Is Bankrolling Climate Change Deniers While We All Burn](../../generated_pages/incidents/I00111.md) | In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:

“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.

“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”

Patreon did not respond to VICE News’ request for comment on the report’s findings.

One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.

[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.

DuByne offers seven different membership levels for supporters, beginning at just $1 per month.

The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.

The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.


David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account Asset, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access Asset). | diff --git a/generated_pages/techniques/T0155.007.md b/generated_pages/techniques/T0155.007.md index 2ae84f8..a321020 100644 --- a/generated_pages/techniques/T0155.007.md +++ b/generated_pages/techniques/T0155.007.md @@ -7,6 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | +| [I00068 Attempted Audio Deepfake Call Targets LastPass Employee](../../generated_pages/incidents/I00068.md) | “While reports of [...] deepfake calls targeting private companies are luckily still rare, LastPass itself experienced a deepfake attempt earlier today that we are sharing with the larger community to help raise awareness that this tactic is spreading and all companies should be on the alert. In our case, an employee received a series of calls, texts, and at least one voicemail featuring an audio deepfake from a threat actor impersonating our CEO via WhatsApp. As the attempted communication was outside of normal business communication channels and due to the employee’s suspicion regarding the presence of many of the hallmarks of a social engineering attempt (such as forced urgency), our employee rightly ignored the messages and reported the incident to our internal security team so that we could take steps to both mitigate the threat and raise awareness of the tactic both internally and externally.”

In this example attackers created an account on WhatsApp which impersonated the CEO of lastpass (T0097.100: Individual Persona, T0143.003: Impersonated Persona, T0146: Account Asset, T0151.004: Chat Platform, T0155.007: Encrypted Communication Channel). They used this asset to target an employee using deepfaked audio (T0088.001: Develop AI-Generated Audio (Deepfakes)). | diff --git a/generated_pages/techniques/T0155.md b/generated_pages/techniques/T0155.md index d862e38..437cd59 100644 --- a/generated_pages/techniques/T0155.md +++ b/generated_pages/techniques/T0155.md @@ -7,7 +7,7 @@ | Incident | Descriptions given for this incident | | -------- | -------------------- | -| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform). | +| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.

In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;

Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform).

The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform). | diff --git a/generated_pages/techniques_index.md b/generated_pages/techniques_index.md index bab7eab..b416265 100644 --- a/generated_pages/techniques_index.md +++ b/generated_pages/techniques_index.md @@ -1803,31 +1803,31 @@ T0146 -Account +Account Asset An Account is a user-specific profile that allows access to the features and services of an online platform, typically requiring a username and password for authentication. TA06 T0146.001 -Free Account +Free Account Asset Many online platforms allow users to create free accounts on their platform. A Free Account is an Account which does not require payment at account creation and is not subscribed to paid platform features. TA06 T0146.002 -Paid Account +Paid Account Asset Some online platforms afford accounts extra features, or other benefits, if the user pays a fee. For example, as of September 2024, content posted by a Paid Account on X (previously Twitter) is prioritised in the platform’s algorithm. TA06 T0146.003 -Verified Account +Verified Account Asset Some online platforms apply badges of verification to accounts which meet certain criteria.

On some platforms (such as dating apps) a verification badge signifies that the account has passed the platform’s identity verification checks. On some platforms (such as X (previously Twitter)) a verification badge signifies that an account has paid for the platform’s service. TA06 T0146.004 -Administrator Account +Administrator Account Asset Some accounts will have special privileges / will be in control of the Digital Community Hosting Asset; for example, the Admin of a Facebook Page, a Moderator of a Subreddit, etc. etc. TA06 @@ -1845,37 +1845,37 @@ T0146.007 -Automated Account +Automated Account Asset An Automated Account is an account which is displaying automated behaviour, such as republishing or liking other accounts’ content, or publishing their own content. TA06 T0147 -Software +Software Asset A Software is a program developed to run on computers or devices that helps users achieve specific goals, such as improving productivity, automating tasks, or having fun. TA06 T0147.001 -Game +Game Asset A Game is Software which has been designed for interactive entertainment, where users take on challenges set by the game’s designers.

While Online Game Platforms allow people to play with each other, Games are designed for single player experiences. TA06 T0147.002 -Game Mod +Game Mod Asset A Game Mod is a modification which can be applied to a Game or Multiplayer Online Game to add new content or functionality to the game.

Users can Modify Games to introduce new content to the game. Modified Games can be distributed on Software Delivery Platforms such as Steam or can be distributed within the Game or Multiplayer Online Game. TA06 T0147.003 -Malware +Malware Asset Malware is Software which has been designed to cause harm or facilitate malicious behaviour on electronic devices.

DISARM recommends using the [MITRE ATT&CK Framework](https://attack.mitre.org/) to document malware types and their usage. TA06 T0147.004 -Mobile App +Mobile App Asset A Mobile App is an application which has been designed to run on mobile operating systems, such as Android or iOS.

Mobile Apps can enable access to online platforms (e.g. Facebook’s mobile app) or can provide software which users can run offline on their device. TA06 @@ -1893,7 +1893,7 @@ T0148.002 -Bank Account +Bank Account Asset A Bank Account is a financial account that allows individuals or organisations to store, manage, and access their money, typically for saving, spending, or investment purposes. TA06 @@ -1947,14 +1947,14 @@ T0149.001 -Domain -A Domain is a web address (such as “www.google.com”), used to navigate to Websites on the internet.

Domains differ from Websites in that Websites are considered to be developed web pages which host content, whereas Domains do not necessarily host public-facing web content.

A threat actor may register a new domain to bypass the old domain being blocked. +Domain Asset +A Domain is a web address (such as “google[.]com”), used to navigate to Websites on the internet.

Domains differ from Websites in that Websites are considered to be developed web pages which host content, whereas Domains do not necessarily host public-facing web content.

A threat actor may register a new domain to bypass the old domain being blocked. TA06 T0149.002 -Email Domain -An Email Domain is a Domain (such as “meta.com”) which has the ability to send emails.

Any Domain which has an MX (Mail Exchange) record and configured SMTP (Simple Mail Transfer Protocol) settings can send and receive emails, and is therefore an Email Domain. +Email Domain Asset +An Email Domain is a Domain (such as “meta[.]com”) which has the ability to send emails (e.g. from an @meta[.]com address).

Any Domain which has an MX (Mail Exchange) record and configured SMTP (Simple Mail Transfer Protocol) settings can send and receive emails, and is therefore an Email Domain. TA06 @@ -1965,31 +1965,31 @@ T0149.004 -Redirecting Domain +Redirecting Domain Asset A Redirecting Domain is a Domain which has been configured to redirect users to another Domain when visited. TA06 T0149.005 -Server +Server Asset A Server is a computer which provides resources, services, or data to other computers over a network. There are different types of servers, such as web servers (which serve web pages and applications to users), database servers (which manage and provide access to databases), and file servers (which store and share files across a network). TA06 T0149.006 -IP Address +IP Address Asset An IP Address is a unique numerical label assigned to each device connected to a computer network that uses the Internet Protocol for communication. IP addresses are commonly a part of any online infrastructure.

IP addresses can be in IPV4 dotted decimal (x.x.x.x) or IPV6 colon-separated hexadecimal (y:y:y:y:y:y:y:y) formats. TA06 T0149.007 -VPN +VPN Asset A VPN (Virtual Private Network) is a service which creates secure, encrypted connections over the internet, allowing users to transmit data safely and access network resources remotely. It masks IP Addresses, enhancing privacy and security by preventing unauthorised access and tracking. VPNs are commonly used for protecting sensitive information, bypassing geographic restrictions, and maintaining online anonymity.

VPNs can also allow a threat actor to pose as if they are located in one country while in reality being based in another. By doing so, they can try to either mis-attribute their activities to another actor or better hide their own identity. TA06 T0149.008 -Proxy IP Address +Proxy IP Address Asset A Proxy IP Address allows a threat actor to mask their real IP Address by putting a layer between them and the online content they’re connecting with.

Proxy IP Addresses can hide the connection between the threat actor and their online infrastructure. TA06 @@ -2007,49 +2007,49 @@ T0150.001 -Newly Created +Newly Created Asset A Newly Created Asset is an asset which has been created and used for the first time in a documented potential incident.

For example, analysts which can identify a recent creation date of Accounts participating in the spread of a new narrative can assert these are Newly Created Assets.

Analysts should use Dormant if the asset was created and laid dormant for an extended period of time before activity. TA06 T0150.002 -Dormant +Dormant Asset A Dormant Asset is an asset which was inactive for an extended period before being used in a documented potential incident. TA06 T0150.003 -Pre-Existing +Pre-Existing Asset Pre-Existing Assets are assets which existed before the observed incident which have not been Repurposed; i.e. they are still being used for their original purpose.

An example could be an Account which presented itself with a Journalist Persona prior to and during the observed potential incident. TA06 T0150.004 -Repurposed +Repurposed Asset Repurposed Assets are assets which have been identified as being used previously, but are now being used for different purposes, or have new Presented Personas.

Actors have been documented compromising assets, and then repurposing them to present Inauthentic Personas as part of their operations. TA06 T0150.005 -Compromised +Compromised Asset A Compromised Asset is an asset which was originally created or belonged to another person or organisation, but which an actor has gained access to without their consent.

See also MITRE ATT&CK T1708: Valid Accounts. TA06 T0150.006 -Purchased +Purchased Asset A Purchased Asset is an asset which actors paid for the ownership of.

For example, threat actors have been observed selling compromised social media accounts on dark web marketplaces, which can be used to disguise operation activity. TA06 T0150.007 -Rented +Rented Asset A Rented Asset is an asset which actors are temporarily renting or subscribing to.

For example, threat actors have been observed renting temporary access to legitimate accounts on online platforms in order to disguise operation activity. TA06 T0150.008 -Bulk Created +Bulk Created Asset A Bulk Created Asset is an asset which was created alongside many other instances of the same asset.

Actors have been observed bulk creating Accounts on Social Media Platforms such as Facebook. Indicators of bulk asset creation include its creation date, assets’ naming conventions, their configuration (e.g. templated personas, visually similar profile pictures), or their activity (e.g. post timings, narratives posted). TA06 @@ -2175,7 +2175,7 @@ T0152.002 -Blog +Blog Asset Blogs are a collation of posts centred on a particular topic, author, or collection of authors.

Some platforms are designed to support users in hosting content online, such as Blogging Platforms like Substack which allow users to create Blogs, but other online platforms can also be used to produce a Blog; a Paid Account on X (prev Twitter) is able to post long-form text content to their timeline in a style of a blog.

Actors may create Accounts on Blogging Platforms to create a Blog, or make their own Blog on a Website. TA07 @@ -2187,7 +2187,7 @@ T0152.004 -Website +Website Asset A Website is a collection of related web pages hosted on a server and accessible via a web browser. Websites have an associated Domain and can host various types of content, such as text, images, videos, and interactive features.

When a Website is fleshed out, it Presents a Persona to site visitors. For example, the Domain “bbc.co.uk/news” hosts a Website which uses the News Outlet Persona. TA07 @@ -2259,13 +2259,13 @@ T0153.003 -Shortened Link +Shortened Link Asset A Shortened Link is a custom URL which is typically a shortened version of another URL. TA07 T0153.004 -QR Code +QR Code Asset A QR Code allows people to use cameras on their smartphones to open a URL. TA07 @@ -2313,37 +2313,37 @@ T0155.001 -Password Gated +Password Gated Asset A Password Gated Asset is an online asset which requires a password to gain access.

Examples include password protected Servers set up to be a File Hosting Platform, or password protected Community Sub-Forums. TA07 T0155.002 -Invite Gated +Invite Gated Asset An Invite Gated Asset is an online asset which requires an existing user to invite other users for access to the asset.

Examples include Chat Groups in which Administrator Accounts are able to add or remove users, or File Hosting Platforms which allow users to invite other users to access their files. TA07 T0155.003 -Approval Gated +Approval Gated Asset An Approval Gated Asset is an online asset which requires approval from Administrator Accounts for access to the asset.

Examples include Online Community Groups on Facebook, which can be configured to require questions and approval before access, and Accounts on Social Media Platforms such as Instagram, which allow users to set their accounts as visible to approved friends only. TA07 T0155.004 -Geoblocked +Geoblocked Asset A Geoblocked Asset is an online asset which cannot be accessed in specific geographical locations.

Assets can be Geoblocked by choice of the platform, or can have Geoblocking mandated by regulators, and enforced through Internet Service Providers. TA07 T0155.005 -Paid Access +Paid Access Asset A Paid Access Asset is an online asset which requires a single payment for permanent access to the asset. TA07 T0155.006 -Subscription Access +Subscription Access Asset A Subscription Access Asset is an online asset which requires a continued subscription for access to the asset.

Examples include the Blogging Platform Substack, which affords Blogs hosted on their platform the ability to produce subscriber-only posts, and the Subscription Service Platform Patreon. TA07